code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file
###############################################################
if __name__ == '__main__':
main(sys.argv[1:])
|
normal
|
{
"blob_id": "9515dcdfc0ece1a6740d6e7075bbcd1c20977590",
"index": 9157,
"step-1": "#! /usr/bin/env python2\n############################################################\n# Program is part of PySAR v1.2 #\n# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #\n# Author: Heresh Fattahi, Zhang Yunjun #\n############################################################\n\n\nimport os\nimport sys\nimport argparse\nimport re\n\ntry:\n import pyaps as pa\nexcept:\n sys.exit('Cannot import pyaps into Python!')\n\nimport h5py\nimport numpy as np\n\nimport pysar._datetime as ptime\nimport pysar._pysar_utilities as ut\nimport pysar._readfile as readfile\nimport pysar._writefile as writefile\n\n\n###############################################################\ndef get_delay(grib_file, atr, inps_dict):\n '''Get delay matrix using PyAPS for one acquisition\n Inputs:\n grib_file - strng, grib file path\n atr - dict, including the following attributes:\n dem_file - string, DEM file path\n grib_source - string, Weather re-analysis data source\n delay_type - string, comb/dry/wet\n ref_y/x - string, reference pixel row/col number\n inc_angle - np.array, 0/1/2 D\n Output:\n phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x\n '''\n if 'X_FIRST' in atr.keys():\n aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\\\n verb=True, Del=inps_dict['delay_type'])\n else:\n aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\\\n verb=True, Del=inps_dict['delay_type'])\n phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)\n aps.getdelay(phs, inc=0.0)\n\n # Get relative phase delay in space\n yref = int(atr['ref_y'])\n xref = int(atr['ref_x'])\n phs -= phs[yref, xref]\n\n # project into LOS direction\n phs /= np.cos(inps_dict['inc_angle'])\n \n # reverse the sign for consistency between different phase correction steps/methods\n phs *= -1\n \n return phs\n\n\ndef date_list2grib_file(date_list, hour, grib_source, grib_dir):\n grib_file_list = []\n for d in date_list:\n grib_file = grib_dir+'/'\n if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)\n elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)\n elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)\n elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)\n elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)\n grib_file_list.append(grib_file)\n return grib_file_list\n\n\ndef dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):\n '''Download weather re-analysis grib files using PyAPS\n Inputs:\n date_list : list of string in YYYYMMDD format\n hour : string in HH:MM or HH format\n grib_source : string, \n weather_dir : string,\n Output:\n grib_file_list : list of string\n '''\n ## Grib data directory\n weather_dir = os.path.abspath(weather_dir)\n grib_dir = weather_dir+'/'+grib_source\n if not os.path.isdir(grib_dir):\n print 'making directory: '+grib_dir\n os.makedirs(grib_dir)\n\n ## Date list to grib file list\n grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)\n\n ## Get date list to download (skip already downloaded files)\n grib_file_existed = ut.get_file_list(grib_file_list)\n if grib_file_existed:\n grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])\n grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])\n grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\\\n str(os.path.getsize(i))[0:2] != grib_filesize_max2)]\n print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)\n print 'number of grib files existed : %d' % len(grib_file_existed)\n if grib_file_corrupted:\n print '------------------------------------------------------------------------------'\n print 'corrupted grib files detected! Delete them and re-download...'\n print 'number of grib files corrupted : %d' % len(grib_file_corrupted)\n for i in grib_file_corrupted:\n rmCmd = 'rm '+i\n print rmCmd\n os.system(rmCmd)\n grib_file_existed.remove(i)\n print '------------------------------------------------------------------------------'\n grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))\n date_list2download = [str(re.findall('\\d{8}', i)[0]) for i in grib_file2download]\n print 'number of grib files to download: %d' % len(date_list2download)\n print '------------------------------------------------------------------------------\\n'\n\n ## Download grib file using PyAPS\n if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)\n elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)\n elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)\n elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)\n elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)\n\n return grib_file_existed\n\n\n###############################################################\nEXAMPLE='''example:\n tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5\n tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER\n tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt\n\n tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR\n tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23\n tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5\n\n tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download\n tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download\n'''\n\nREFERENCE='''reference:\n Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric\n phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,\n doi:10.1029/2011GL048757\n'''\n\nTEMPLATE='''\n## 7. Tropospheric Delay Correction (optional and recommended)\n## correct tropospheric delay using the following methods:\n## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)\n## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)\n## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)\npysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps\npysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method\npysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method\npysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram \n'''\n\nDATA_INFO='''\n re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis\n------------------------------------------------------------------------------------------------------------\nERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var\nMERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var\n\nTo download MERRA2, you need an Earthdata account, and pre-authorize the \"NASA GESDISC DATA ARCHIVE\" application, following https://disc.gsfc.nasa.gov/earthdata-login.\n'''\n\n\ndef cmdLineParse():\n parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\\n'+\\\n ' PyAPS is used to download and calculate the delay for each time-series epoch.',\\\n formatter_class=argparse.RawTextHelpFormatter,\\\n epilog=REFERENCE+'\\n'+DATA_INFO+'\\n'+EXAMPLE)\n\n parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')\n parser.add_argument('-d','--dem', dest='dem_file',\\\n help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')\n parser.add_argument('-i', dest='inc_angle', default='30',\\\n help='a file containing all incidence angles, or a number representing for the whole image.')\n parser.add_argument('--weather-dir', dest='weather_dir', \\\n help='directory to put downloaded weather data, i.e. ./../WEATHER\\n'+\\\n 'use directory of input timeseries_file if not specified.')\n parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\\\n help='Delay type to calculate, comb contains both wet and dry delays')\n parser.add_argument('--download', action='store_true', help='Download weather data only.')\n parser.add_argument('--date-list', dest='date_list_file',\\\n help='Read the first column of text file as list of date to download data\\n'+\\\n 'in YYYYMMDD or YYMMDD format')\n parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')\n\n parser.add_argument('-s', dest='weather_model',\\\n default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\\\n help='source of the atmospheric data.\\n'+\\\n 'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\\n'+\\\n 'NARR is working for 1979-Jan to 2014-Oct.\\n'+\\\n 'MERRA(2) is not working.')\n parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')\n\n parser.add_argument('--template', dest='template_file',\\\n help='template file with input options below:\\n'+TEMPLATE)\n parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')\n\n inps = parser.parse_args()\n\n # Calculate DELAY or DOWNLOAD DATA ONLY, required one of them\n if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):\n parser.print_help()\n sys.exit(1)\n return inps\n\n\n###############################################################\ndef main(argv):\n inps = cmdLineParse()\n\n k = None\n atr = dict()\n if inps.timeseries_file:\n inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]\n atr = readfile.read_attribute(inps.timeseries_file)\n k = atr['FILE_TYPE']\n elif inps.dem_file:\n inps.dem_file = ut.get_file_list([inps.dem_file])[0]\n atr = readfile.read_attribute(inps.dem_file)\n if 'ref_y' not in atr.keys() and inps.ref_yx:\n print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)\n atr['ref_y'] = inps.ref_yx[0]\n atr['ref_x'] = inps.ref_yx[1]\n\n ##Read Incidence angle: to map the zenith delay to the slant delay\n if os.path.isfile(inps.inc_angle):\n inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]\n else:\n inps.inc_angle = float(inps.inc_angle)\n print 'incidence angle: '+str(inps.inc_angle)\n inps.inc_angle = inps.inc_angle*np.pi/180.0\n\n ##Prepare DEM file in ROI_PAC format for PyAPS to read\n if inps.dem_file:\n inps.dem_file = ut.get_file_list([inps.dem_file])[0]\n if os.path.splitext(inps.dem_file)[1] in ['.h5']:\n print 'convert DEM file to ROIPAC format'\n dem, atr_dem = readfile.read(inps.dem_file, epoch='height')\n if 'Y_FIRST' in atr.keys():\n atr_dem['FILE_TYPE'] = '.dem'\n else:\n atr_dem['FILE_TYPE'] = '.hgt'\n outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']\n inps.dem_file = writefile.write(dem, atr_dem, outname)\n\n print '*******************************************************************************'\n print 'Downloading weather model data ...'\n\n ## Get Grib Source\n if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'\n elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'\n elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'\n elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'\n else: raise Reception('Unrecognized weather model: '+inps.weather_model)\n print 'grib source: '+inps.grib_source\n\n # Get weather directory\n if not inps.weather_dir:\n if inps.timeseries_file:\n inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'\n elif inps.dem_file:\n inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'\n else:\n inps.weather_dir = os.path.abspath(os.getcwd())\n print 'Store weather data into directory: '+inps.weather_dir\n\n # Get date list to download\n if not inps.date_list_file:\n print 'read date list info from: '+inps.timeseries_file\n h5 = h5py.File(inps.timeseries_file, 'r')\n if 'timeseries' in h5.keys():\n date_list = sorted(h5[k].keys())\n elif k in ['interferograms','coherence','wrapped']:\n ifgram_list = sorted(h5[k].keys())\n date12_list = ptime.list_ifgram2date12(ifgram_list)\n m_dates = [i.split('-')[0] for i in date12_list]\n s_dates = [i.split('-')[1] for i in date12_list]\n date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))\n else:\n raise ValueError('Un-support input file type:'+k)\n h5.close()\n else:\n date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())\n print 'read date list info from: '+inps.date_list_file\n\n # Get Acquisition time - hour\n if not inps.hour:\n inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)\n print 'Time of cloest available product: '+inps.hour\n\n ## Download data using PyAPS\n inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)\n\n if inps.download:\n print 'Download completed, exit as planned.'\n return\n\n print '*******************************************************************************'\n print 'Calcualting delay for each epoch.'\n\n ## Calculate tropo delay using pyaps\n length = int(atr['FILE_LENGTH'])\n width = int(atr['WIDTH'])\n date_num = len(date_list)\n trop_ts = np.zeros((date_num, length, width), np.float32)\n for i in range(date_num):\n grib_file = inps.grib_file_list[i] \n date = date_list[i]\n print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))\n trop_ts[i] = get_delay(grib_file, atr, vars(inps))\n\n ## Convert relative phase delay on reference date\n try: ref_date = atr['ref_date']\n except: ref_date = date_list[0]\n print 'convert to relative phase delay with reference date: '+ref_date\n ref_idx = date_list.index(ref_date)\n trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))\n\n ## Write tropospheric delay to HDF5\n tropFile = inps.grib_source+'.h5'\n print 'writing >>> %s' % (tropFile)\n h5trop = h5py.File(tropFile, 'w')\n group_trop = h5trop.create_group('timeseries')\n print 'number of acquisitions: '+str(date_num)\n prog_bar = ptime.progress_bar(maxValue=date_num)\n for i in range(date_num):\n date = date_list[i]\n group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')\n prog_bar.update(i+1, suffix=date)\n prog_bar.close()\n # Write Attributes\n for key,value in atr.iteritems():\n group_trop.attrs[key] = value\n h5trop.close()\n\n ## Write corrected Time series to HDF5\n if k == 'timeseries':\n if not inps.out_file:\n inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'\n print 'writing >>> %s' % (inps.out_file)\n h5ts = h5py.File(inps.timeseries_file, 'r')\n h5tsCor = h5py.File(inps.out_file, 'w') \n group_tsCor = h5tsCor.create_group('timeseries')\n print 'number of acquisitions: '+str(date_num)\n prog_bar = ptime.progress_bar(maxValue=date_num)\n for i in range(date_num):\n date = date_list[i]\n ts = h5ts['timeseries'].get(date)[:]\n group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')\n prog_bar.update(i+1, suffix=date)\n prog_bar.close()\n h5ts.close()\n # Write Attributes\n for key,value in atr.iteritems():\n group_tsCor.attrs[key] = value\n h5tsCor.close()\n\n # Delete temporary DEM file in ROI_PAC format\n if '4pyaps' in inps.dem_file:\n rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)\n print rmCmd\n os.system(rmCmd)\n print 'Done.'\n return inps.out_file\n\n\n###############################################################\nif __name__ == '__main__':\n main(sys.argv[1:])\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import JsonResponse
from knowdb.models import Knowledge
import random
# Create your views here.
def answer(request):
ret = {}
data = Knowledge.objects.all()
num = random.choice(range(1,int(data.count())+1))
ret['name'] = data[num-1].name
ret['answer'] = data[num-1].answer
print ret
return JsonResponse({'exec':'true','ret':ret})
def edit(request):
name = request.POST.get('name')
answer = request.POST.get('answer')
print name,answer
try:
adddata = Knowledge(name=name,answer=answer)
adddata.save()
return JsonResponse({'exec':'true','ret':'提交成功'})
except Exception as e:
return JsonResponse({'exec':'false','ret':'提交失败'})
|
normal
|
{
"blob_id": "eb558644283d992af2c324d457dbe674b714235f",
"index": 735,
"step-1": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom knowdb.models import Knowledge\n\nimport random\n# Create your views here.\n\ndef answer(request):\n ret = {}\n data = Knowledge.objects.all()\n num = random.choice(range(1,int(data.count())+1))\n ret['name'] = data[num-1].name\n ret['answer'] = data[num-1].answer\n print ret\n return JsonResponse({'exec':'true','ret':ret})\n\n\n\ndef edit(request):\n name = request.POST.get('name')\n answer = request.POST.get('answer')\n print name,answer\n try:\n adddata = Knowledge(name=name,answer=answer)\n adddata.save()\n return JsonResponse({'exec':'true','ret':'提交成功'})\n except Exception as e:\n return JsonResponse({'exec':'false','ret':'提交失败'})\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright © YXC
# CreateTime: 2016-03-09 10:06:02
"""
Example of functions with arbitrary number arguments
"""
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print("arg1:{0}".format(arg1))
print("arg2:{0}".format(arg2))
def arbitrary_argument_func(*args):
"""
just use "*" to collect all remaining arguments into a tuple
"""
numargs = len(args)
print("Number of arguments:{0}".format(numargs))
for i, arg in enumerate(args):
print("Argument {0} is : {1}".format(i, arg))
if __name__ == "__main__":
optional_argument_func("Hello", "World")
arbitrary_argument_func()
arbitrary_argument_func("hello")
arbitrary_argument_func("hello", "world", "again")
|
normal
|
{
"blob_id": "061a78650e2abf6a9d1e4796dd349174a8df5cb8",
"index": 8747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print('arg1:{0}'.format(arg1))\n print('arg2:{0}'.format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print('Number of arguments:{0}'.format(numargs))\n for i, arg in enumerate(args):\n print('Argument {0} is : {1}'.format(i, arg))\n\n\nif __name__ == '__main__':\n optional_argument_func('Hello', 'World')\n arbitrary_argument_func()\n arbitrary_argument_func('hello')\n arbitrary_argument_func('hello', 'world', 'again')\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n# Copyright © YXC\n# CreateTime: 2016-03-09 10:06:02\n\n\"\"\"\nExample of functions with arbitrary number arguments\n\"\"\"\n\n\ndef optional_argument_func(arg1='', arg2=''):\n \"\"\"\n Function with two optional arguments\n \"\"\"\n print(\"arg1:{0}\".format(arg1))\n print(\"arg2:{0}\".format(arg2))\n\n\ndef arbitrary_argument_func(*args):\n \"\"\"\n just use \"*\" to collect all remaining arguments into a tuple\n \"\"\"\n numargs = len(args)\n print(\"Number of arguments:{0}\".format(numargs))\n for i, arg in enumerate(args):\n print(\"Argument {0} is : {1}\".format(i, arg))\n\n\nif __name__ == \"__main__\":\n optional_argument_func(\"Hello\", \"World\")\n arbitrary_argument_func()\n arbitrary_argument_func(\"hello\")\n arbitrary_argument_func(\"hello\", \"world\", \"again\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
import sys
import os
#Add project root to path
sys.path.append('../..')
from speckle.SpeckleClient import SpeckleApiClient
class TestSpeckleStream(unittest.TestCase):
def setUp(self):
self.s = SpeckleApiClient()
self.user = {'email':'[email protected]','password':'testpassword', 'username':'testuser'}
self.test_stream = 'RKWgU-oWF'
self.test_object = '5bcf2c7e3ff66c15abac431d'
login = self.s.UserLoginAsync(self.user)
assert login, 'Test User Login was not successful'
self.user['id'] = login['resource']['_id']
self.stream = self.s.StreamGetAsync(self.test_stream)
obj = self.s.StreamGetObjectsAsync(self.test_stream)
#for o in obj['resources']:
# r = self.s.ObjectDeleteAsync(o['_id'])
self.s.StreamUpdateAsync(self.test_stream, self.stream)
def tearDown(self):
self.s.StreamUpdateAsync(self.test_stream, self.stream)
def none_msg(self, header):
return header + ' responded with None'
def test_get_object(self):
r = self.s.ObjectGetAsync(self.test_object)
self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))
self.assertTrue(r['success'])
def test_create_object(self):
r = self.s.ObjectCreateAsync([{"owner": self.user['username']}])
self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))
self.assertTrue(r['success'])
self.assertTrue(r['resources'])
#Check created object ID is in response
resource = r['resources'][0]
self.assertTrue(resource['_id'])
print(resource['_id'])
self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])
def test_create_point_object(self):
obj = {
"owner": self.user['username'],
"type": "Point",
"hash": "hash",
"value": [0,0,0]
}
r = self.s.ObjectCreateAsync([obj])
self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))
self.assertTrue(r['success'])
self.assertTrue(r['resources'])
#Check created object ID is in response
resource = r['resources'][0]
self.assertTrue(resource['_id'])
print(resource['_id'])
self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])
def test_create_mesh_object(self):
obj = {
"owner": self.user['username'],
"type": "Mesh",
"geometryHash": "Mesh.66ec936fc8eb1844581db685e5672f79",
"hash": "2e4d67853709316f17e3745cd700a9ed",
"properties": {
"center": {
"type": "Point",
"value": [
-2.326136578802356,
7.41377889150433,
0.01525474415516414
],
"hash": "318e1a3b9bf16bf5711170b61b4cd144",
"geometryHash": "Point.8012f72d1fd49795101ab099b7dff3cb"
},
"area": 1.6718884716988291,
"revitFamTYpe": "undefined"
},
"vertices": [
-2.6709675788879395,
7.420193672180176,
0.007017634343355894,
-2.6617817878723145,
7.910780906677246,
0.016628438606858253,
-2.6525962352752686,
8.401368141174316,
0.026239242404699326,
-2.6434104442596436,
8.891955375671387,
0.03585004433989525,
-2.6342246532440186,
9.382542610168457,
0.04546085000038147,
-2.507732629776001,
6.9263834953308105,
0.005644594319164753,
-2.498547077178955,
7.416970729827881,
0.01319583784788847,
-2.48936128616333,
7.907557964324951,
0.02074708230793476,
-2.480175495147705,
8.39814567565918,
0.028298325836658478,
-2.47098970413208,
8.88873291015625,
0.035849571228027344,
-2.3444979190826416,
6.432573318481445,
0.004271554294973612,
-2.3353121280670166,
6.923160552978516,
0.00976323802024126,
-2.3261263370513916,
7.413747787475586,
0.015254922211170197,
-2.3169405460357666,
7.9043354988098145,
0.020746605470776558,
-2.3077549934387207,
8.394922256469727,
0.02623829059302807,
-2.181262969970703,
5.93876314163208,
0.0028985145036131144,
-2.172077178955078,
6.42935037612915,
0.006330638192594051,
-2.162891387939453,
6.919937610626221,
0.009762762114405632,
-2.1537058353424072,
7.410524845123291,
0.013194886036217213,
-2.1445200443267822,
7.9011125564575195,
0.016627009958028793,
-2.0180280208587646,
5.444952964782715,
0.0015254743630066514,
-2.0088422298431396,
5.935540199279785,
0.002898038364946842,
-1.9996565580368042,
6.4261274337768555,
0.0042706020176410675,
-1.9904708862304688,
6.916714668273926,
0.00564316613599658,
-1.9812850952148438,
7.407302379608154,
0.0070157297886908054
],
"faces": [
1,
6,
1,
0,
5,
1,
7,
2,
1,
6,
1,
8,
3,
2,
7,
1,
9,
4,
3,
8,
1,
11,
6,
5,
10,
1,
12,
7,
6,
11,
1,
13,
8,
7,
12,
1,
14,
9,
8,
13,
1,
16,
11,
10,
15,
1,
17,
12,
11,
16,
1,
18,
13,
12,
17,
1,
19,
14,
13,
18,
1,
21,
16,
15,
20,
1,
22,
17,
16,
21,
1,
23,
18,
17,
22,
1,
24,
19,
18,
23
]
}
r = self.s.ObjectCreateAsync([obj])
self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))
self.assertTrue(r['success'])
self.assertTrue(r['resources'])
# Check created object ID is in response
resource = r['resources'][0]
self.assertTrue(resource['_id'])
print(resource['_id'])
self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])
def test_line_object(self):
obj = {
"type": "Line",
"value": [
-5689.317811503128,
-13716.87365524665,
3448.9999880790538,
-5688.317811503128,
-13717.87365524665,
3539.9999880790538
],
}
r = self.s.ObjectCreateAsync([obj])
self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))
self.assertTrue(r['success'])
self.assertTrue(r['resources'])
# Check created object ID is in response
resource = r['resources'][0]
self.assertTrue(resource['_id'])
print(resource['_id'])
self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])
def test_line_objects(self):
objects = [
{
"type": "Line",
"value": [
0,
0,
0,
1,
1,
1
],
},
{
"type": "Line",
"value": [
-1,
-1,
-1,
2,
2,
2
],
},
]
r = self.s.ObjectCreateAsync(objects)
self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))
self.assertTrue(r['success'])
self.assertTrue(r['resources'])
# Check created object ID is in response
resource = r['resources'][0]
self.assertTrue(resource['_id'])
print(resource['_id'])
self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])
def test_update_object(self):
geometry = {
"vertices": [0.0, 1.0, 2.0, 3.0],
"faces": [1,2,3]
}
props = {
'type': 'RCSlab',
'material': 'Concrete'
}
data = {'properties': props}
data.update(geometry)
r = self.s.ObjectUpdateAsync(self.test_object, data)
self.assertIsNotNone(r)
#Todo: Look into why user is not authorized to update
self.assertTrue(r['success'])
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "b39403171ed264c8fae5ea4ae9d17f77cfcab497",
"index": 9122,
"step-1": "<mask token>\n\n\nclass TestSpeckleStream(unittest.TestCase):\n\n def setUp(self):\n self.s = SpeckleApiClient()\n self.user = {'email': '[email protected]', 'password':\n 'testpassword', 'username': 'testuser'}\n self.test_stream = 'RKWgU-oWF'\n self.test_object = '5bcf2c7e3ff66c15abac431d'\n login = self.s.UserLoginAsync(self.user)\n assert login, 'Test User Login was not successful'\n self.user['id'] = login['resource']['_id']\n self.stream = self.s.StreamGetAsync(self.test_stream)\n obj = self.s.StreamGetObjectsAsync(self.test_stream)\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def tearDown(self):\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def none_msg(self, header):\n return header + ' responded with None'\n\n def test_get_object(self):\n r = self.s.ObjectGetAsync(self.test_object)\n self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))\n self.assertTrue(r['success'])\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_line_objects(self):\n objects = [{'type': 'Line', 'value': [0, 0, 0, 1, 1, 1]}, {'type':\n 'Line', 'value': [-1, -1, -1, 2, 2, 2]}]\n r = self.s.ObjectCreateAsync(objects)\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_update_object(self):\n geometry = {'vertices': [0.0, 1.0, 2.0, 3.0], 'faces': [1, 2, 3]}\n props = {'type': 'RCSlab', 'material': 'Concrete'}\n data = {'properties': props}\n data.update(geometry)\n r = self.s.ObjectUpdateAsync(self.test_object, data)\n self.assertIsNotNone(r)\n self.assertTrue(r['success'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSpeckleStream(unittest.TestCase):\n\n def setUp(self):\n self.s = SpeckleApiClient()\n self.user = {'email': '[email protected]', 'password':\n 'testpassword', 'username': 'testuser'}\n self.test_stream = 'RKWgU-oWF'\n self.test_object = '5bcf2c7e3ff66c15abac431d'\n login = self.s.UserLoginAsync(self.user)\n assert login, 'Test User Login was not successful'\n self.user['id'] = login['resource']['_id']\n self.stream = self.s.StreamGetAsync(self.test_stream)\n obj = self.s.StreamGetObjectsAsync(self.test_stream)\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def tearDown(self):\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def none_msg(self, header):\n return header + ' responded with None'\n\n def test_get_object(self):\n r = self.s.ObjectGetAsync(self.test_object)\n self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))\n self.assertTrue(r['success'])\n\n def test_create_object(self):\n r = self.s.ObjectCreateAsync([{'owner': self.user['username']}])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_point_object(self):\n obj = {'owner': self.user['username'], 'type': 'Point', 'hash':\n 'hash', 'value': [0, 0, 0]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_mesh_object(self):\n obj = {'owner': self.user['username'], 'type': 'Mesh',\n 'geometryHash': 'Mesh.66ec936fc8eb1844581db685e5672f79', 'hash':\n '2e4d67853709316f17e3745cd700a9ed', 'properties': {'center': {\n 'type': 'Point', 'value': [-2.326136578802356, 7.41377889150433,\n 0.01525474415516414], 'hash':\n '318e1a3b9bf16bf5711170b61b4cd144', 'geometryHash':\n 'Point.8012f72d1fd49795101ab099b7dff3cb'}, 'area': \n 1.6718884716988291, 'revitFamTYpe': 'undefined'}, 'vertices': [\n -2.6709675788879395, 7.420193672180176, 0.007017634343355894, -\n 2.6617817878723145, 7.910780906677246, 0.016628438606858253, -\n 2.6525962352752686, 8.401368141174316, 0.026239242404699326, -\n 2.6434104442596436, 8.891955375671387, 0.03585004433989525, -\n 2.6342246532440186, 9.382542610168457, 0.04546085000038147, -\n 2.507732629776001, 6.9263834953308105, 0.005644594319164753, -\n 2.498547077178955, 7.416970729827881, 0.01319583784788847, -\n 2.48936128616333, 7.907557964324951, 0.02074708230793476, -\n 2.480175495147705, 8.39814567565918, 0.028298325836658478, -\n 2.47098970413208, 8.88873291015625, 0.035849571228027344, -\n 2.3444979190826416, 6.432573318481445, 0.004271554294973612, -\n 2.3353121280670166, 6.923160552978516, 0.00976323802024126, -\n 2.3261263370513916, 7.413747787475586, 0.015254922211170197, -\n 2.3169405460357666, 7.9043354988098145, 0.020746605470776558, -\n 2.3077549934387207, 8.394922256469727, 0.02623829059302807, -\n 2.181262969970703, 5.93876314163208, 0.0028985145036131144, -\n 2.172077178955078, 6.42935037612915, 0.006330638192594051, -\n 2.162891387939453, 6.919937610626221, 0.009762762114405632, -\n 2.1537058353424072, 7.410524845123291, 0.013194886036217213, -\n 2.1445200443267822, 7.9011125564575195, 0.016627009958028793, -\n 2.0180280208587646, 5.444952964782715, 0.0015254743630066514, -\n 2.0088422298431396, 5.935540199279785, 0.002898038364946842, -\n 1.9996565580368042, 6.4261274337768555, 0.0042706020176410675, \n -1.9904708862304688, 6.916714668273926, 0.00564316613599658, -\n 1.9812850952148438, 7.407302379608154, 0.0070157297886908054],\n 'faces': [1, 6, 1, 0, 5, 1, 7, 2, 1, 6, 1, 8, 3, 2, 7, 1, 9, 4,\n 3, 8, 1, 11, 6, 5, 10, 1, 12, 7, 6, 11, 1, 13, 8, 7, 12, 1, 14,\n 9, 8, 13, 1, 16, 11, 10, 15, 1, 17, 12, 11, 16, 1, 18, 13, 12, \n 17, 1, 19, 14, 13, 18, 1, 21, 16, 15, 20, 1, 22, 17, 16, 21, 1,\n 23, 18, 17, 22, 1, 24, 19, 18, 23]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_object(self):\n obj = {'type': 'Line', 'value': [-5689.317811503128, -\n 13716.87365524665, 3448.9999880790538, -5688.317811503128, -\n 13717.87365524665, 3539.9999880790538]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_objects(self):\n objects = [{'type': 'Line', 'value': [0, 0, 0, 1, 1, 1]}, {'type':\n 'Line', 'value': [-1, -1, -1, 2, 2, 2]}]\n r = self.s.ObjectCreateAsync(objects)\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_update_object(self):\n geometry = {'vertices': [0.0, 1.0, 2.0, 3.0], 'faces': [1, 2, 3]}\n props = {'type': 'RCSlab', 'material': 'Concrete'}\n data = {'properties': props}\n data.update(geometry)\n r = self.s.ObjectUpdateAsync(self.test_object, data)\n self.assertIsNotNone(r)\n self.assertTrue(r['success'])\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../..')\n<mask token>\n\n\nclass TestSpeckleStream(unittest.TestCase):\n\n def setUp(self):\n self.s = SpeckleApiClient()\n self.user = {'email': '[email protected]', 'password':\n 'testpassword', 'username': 'testuser'}\n self.test_stream = 'RKWgU-oWF'\n self.test_object = '5bcf2c7e3ff66c15abac431d'\n login = self.s.UserLoginAsync(self.user)\n assert login, 'Test User Login was not successful'\n self.user['id'] = login['resource']['_id']\n self.stream = self.s.StreamGetAsync(self.test_stream)\n obj = self.s.StreamGetObjectsAsync(self.test_stream)\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def tearDown(self):\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def none_msg(self, header):\n return header + ' responded with None'\n\n def test_get_object(self):\n r = self.s.ObjectGetAsync(self.test_object)\n self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))\n self.assertTrue(r['success'])\n\n def test_create_object(self):\n r = self.s.ObjectCreateAsync([{'owner': self.user['username']}])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_point_object(self):\n obj = {'owner': self.user['username'], 'type': 'Point', 'hash':\n 'hash', 'value': [0, 0, 0]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_mesh_object(self):\n obj = {'owner': self.user['username'], 'type': 'Mesh',\n 'geometryHash': 'Mesh.66ec936fc8eb1844581db685e5672f79', 'hash':\n '2e4d67853709316f17e3745cd700a9ed', 'properties': {'center': {\n 'type': 'Point', 'value': [-2.326136578802356, 7.41377889150433,\n 0.01525474415516414], 'hash':\n '318e1a3b9bf16bf5711170b61b4cd144', 'geometryHash':\n 'Point.8012f72d1fd49795101ab099b7dff3cb'}, 'area': \n 1.6718884716988291, 'revitFamTYpe': 'undefined'}, 'vertices': [\n -2.6709675788879395, 7.420193672180176, 0.007017634343355894, -\n 2.6617817878723145, 7.910780906677246, 0.016628438606858253, -\n 2.6525962352752686, 8.401368141174316, 0.026239242404699326, -\n 2.6434104442596436, 8.891955375671387, 0.03585004433989525, -\n 2.6342246532440186, 9.382542610168457, 0.04546085000038147, -\n 2.507732629776001, 6.9263834953308105, 0.005644594319164753, -\n 2.498547077178955, 7.416970729827881, 0.01319583784788847, -\n 2.48936128616333, 7.907557964324951, 0.02074708230793476, -\n 2.480175495147705, 8.39814567565918, 0.028298325836658478, -\n 2.47098970413208, 8.88873291015625, 0.035849571228027344, -\n 2.3444979190826416, 6.432573318481445, 0.004271554294973612, -\n 2.3353121280670166, 6.923160552978516, 0.00976323802024126, -\n 2.3261263370513916, 7.413747787475586, 0.015254922211170197, -\n 2.3169405460357666, 7.9043354988098145, 0.020746605470776558, -\n 2.3077549934387207, 8.394922256469727, 0.02623829059302807, -\n 2.181262969970703, 5.93876314163208, 0.0028985145036131144, -\n 2.172077178955078, 6.42935037612915, 0.006330638192594051, -\n 2.162891387939453, 6.919937610626221, 0.009762762114405632, -\n 2.1537058353424072, 7.410524845123291, 0.013194886036217213, -\n 2.1445200443267822, 7.9011125564575195, 0.016627009958028793, -\n 2.0180280208587646, 5.444952964782715, 0.0015254743630066514, -\n 2.0088422298431396, 5.935540199279785, 0.002898038364946842, -\n 1.9996565580368042, 6.4261274337768555, 0.0042706020176410675, \n -1.9904708862304688, 6.916714668273926, 0.00564316613599658, -\n 1.9812850952148438, 7.407302379608154, 0.0070157297886908054],\n 'faces': [1, 6, 1, 0, 5, 1, 7, 2, 1, 6, 1, 8, 3, 2, 7, 1, 9, 4,\n 3, 8, 1, 11, 6, 5, 10, 1, 12, 7, 6, 11, 1, 13, 8, 7, 12, 1, 14,\n 9, 8, 13, 1, 16, 11, 10, 15, 1, 17, 12, 11, 16, 1, 18, 13, 12, \n 17, 1, 19, 14, 13, 18, 1, 21, 16, 15, 20, 1, 22, 17, 16, 21, 1,\n 23, 18, 17, 22, 1, 24, 19, 18, 23]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_object(self):\n obj = {'type': 'Line', 'value': [-5689.317811503128, -\n 13716.87365524665, 3448.9999880790538, -5688.317811503128, -\n 13717.87365524665, 3539.9999880790538]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_objects(self):\n objects = [{'type': 'Line', 'value': [0, 0, 0, 1, 1, 1]}, {'type':\n 'Line', 'value': [-1, -1, -1, 2, 2, 2]}]\n r = self.s.ObjectCreateAsync(objects)\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_update_object(self):\n geometry = {'vertices': [0.0, 1.0, 2.0, 3.0], 'faces': [1, 2, 3]}\n props = {'type': 'RCSlab', 'material': 'Concrete'}\n data = {'properties': props}\n data.update(geometry)\n r = self.s.ObjectUpdateAsync(self.test_object, data)\n self.assertIsNotNone(r)\n self.assertTrue(r['success'])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport sys\nimport os\nsys.path.append('../..')\nfrom speckle.SpeckleClient import SpeckleApiClient\n\n\nclass TestSpeckleStream(unittest.TestCase):\n\n def setUp(self):\n self.s = SpeckleApiClient()\n self.user = {'email': '[email protected]', 'password':\n 'testpassword', 'username': 'testuser'}\n self.test_stream = 'RKWgU-oWF'\n self.test_object = '5bcf2c7e3ff66c15abac431d'\n login = self.s.UserLoginAsync(self.user)\n assert login, 'Test User Login was not successful'\n self.user['id'] = login['resource']['_id']\n self.stream = self.s.StreamGetAsync(self.test_stream)\n obj = self.s.StreamGetObjectsAsync(self.test_stream)\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def tearDown(self):\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def none_msg(self, header):\n return header + ' responded with None'\n\n def test_get_object(self):\n r = self.s.ObjectGetAsync(self.test_object)\n self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))\n self.assertTrue(r['success'])\n\n def test_create_object(self):\n r = self.s.ObjectCreateAsync([{'owner': self.user['username']}])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_point_object(self):\n obj = {'owner': self.user['username'], 'type': 'Point', 'hash':\n 'hash', 'value': [0, 0, 0]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_mesh_object(self):\n obj = {'owner': self.user['username'], 'type': 'Mesh',\n 'geometryHash': 'Mesh.66ec936fc8eb1844581db685e5672f79', 'hash':\n '2e4d67853709316f17e3745cd700a9ed', 'properties': {'center': {\n 'type': 'Point', 'value': [-2.326136578802356, 7.41377889150433,\n 0.01525474415516414], 'hash':\n '318e1a3b9bf16bf5711170b61b4cd144', 'geometryHash':\n 'Point.8012f72d1fd49795101ab099b7dff3cb'}, 'area': \n 1.6718884716988291, 'revitFamTYpe': 'undefined'}, 'vertices': [\n -2.6709675788879395, 7.420193672180176, 0.007017634343355894, -\n 2.6617817878723145, 7.910780906677246, 0.016628438606858253, -\n 2.6525962352752686, 8.401368141174316, 0.026239242404699326, -\n 2.6434104442596436, 8.891955375671387, 0.03585004433989525, -\n 2.6342246532440186, 9.382542610168457, 0.04546085000038147, -\n 2.507732629776001, 6.9263834953308105, 0.005644594319164753, -\n 2.498547077178955, 7.416970729827881, 0.01319583784788847, -\n 2.48936128616333, 7.907557964324951, 0.02074708230793476, -\n 2.480175495147705, 8.39814567565918, 0.028298325836658478, -\n 2.47098970413208, 8.88873291015625, 0.035849571228027344, -\n 2.3444979190826416, 6.432573318481445, 0.004271554294973612, -\n 2.3353121280670166, 6.923160552978516, 0.00976323802024126, -\n 2.3261263370513916, 7.413747787475586, 0.015254922211170197, -\n 2.3169405460357666, 7.9043354988098145, 0.020746605470776558, -\n 2.3077549934387207, 8.394922256469727, 0.02623829059302807, -\n 2.181262969970703, 5.93876314163208, 0.0028985145036131144, -\n 2.172077178955078, 6.42935037612915, 0.006330638192594051, -\n 2.162891387939453, 6.919937610626221, 0.009762762114405632, -\n 2.1537058353424072, 7.410524845123291, 0.013194886036217213, -\n 2.1445200443267822, 7.9011125564575195, 0.016627009958028793, -\n 2.0180280208587646, 5.444952964782715, 0.0015254743630066514, -\n 2.0088422298431396, 5.935540199279785, 0.002898038364946842, -\n 1.9996565580368042, 6.4261274337768555, 0.0042706020176410675, \n -1.9904708862304688, 6.916714668273926, 0.00564316613599658, -\n 1.9812850952148438, 7.407302379608154, 0.0070157297886908054],\n 'faces': [1, 6, 1, 0, 5, 1, 7, 2, 1, 6, 1, 8, 3, 2, 7, 1, 9, 4,\n 3, 8, 1, 11, 6, 5, 10, 1, 12, 7, 6, 11, 1, 13, 8, 7, 12, 1, 14,\n 9, 8, 13, 1, 16, 11, 10, 15, 1, 17, 12, 11, 16, 1, 18, 13, 12, \n 17, 1, 19, 14, 13, 18, 1, 21, 16, 15, 20, 1, 22, 17, 16, 21, 1,\n 23, 18, 17, 22, 1, 24, 19, 18, 23]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_object(self):\n obj = {'type': 'Line', 'value': [-5689.317811503128, -\n 13716.87365524665, 3448.9999880790538, -5688.317811503128, -\n 13717.87365524665, 3539.9999880790538]}\n r = self.s.ObjectCreateAsync([obj])\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_objects(self):\n objects = [{'type': 'Line', 'value': [0, 0, 0, 1, 1, 1]}, {'type':\n 'Line', 'value': [-1, -1, -1, 2, 2, 2]}]\n r = self.s.ObjectCreateAsync(objects)\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n print(resource['_id'])\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_update_object(self):\n geometry = {'vertices': [0.0, 1.0, 2.0, 3.0], 'faces': [1, 2, 3]}\n props = {'type': 'RCSlab', 'material': 'Concrete'}\n data = {'properties': props}\n data.update(geometry)\n r = self.s.ObjectUpdateAsync(self.test_object, data)\n self.assertIsNotNone(r)\n self.assertTrue(r['success'])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport sys\nimport os\n#Add project root to path\nsys.path.append('../..')\n\nfrom speckle.SpeckleClient import SpeckleApiClient\n\n\nclass TestSpeckleStream(unittest.TestCase):\n\n def setUp(self):\n\n self.s = SpeckleApiClient()\n self.user = {'email':'[email protected]','password':'testpassword', 'username':'testuser'}\n\n self.test_stream = 'RKWgU-oWF'\n self.test_object = '5bcf2c7e3ff66c15abac431d'\n\n login = self.s.UserLoginAsync(self.user)\n assert login, 'Test User Login was not successful'\n\n self.user['id'] = login['resource']['_id']\n\n self.stream = self.s.StreamGetAsync(self.test_stream)\n obj = self.s.StreamGetObjectsAsync(self.test_stream)\n\n #for o in obj['resources']:\n # r = self.s.ObjectDeleteAsync(o['_id'])\n\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def tearDown(self):\n self.s.StreamUpdateAsync(self.test_stream, self.stream)\n\n def none_msg(self, header):\n return header + ' responded with None'\n \n\n def test_get_object(self):\n r = self.s.ObjectGetAsync(self.test_object)\n\n self.assertIsNotNone(r, self.none_msg('ObjectGetAsync'))\n self.assertTrue(r['success'])\n \n \n def test_create_object(self):\n\n r = self.s.ObjectCreateAsync([{\"owner\": self.user['username']}])\n\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n\n #Check created object ID is in response\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n\n print(resource['_id'])\n\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_point_object(self):\n obj = {\n \"owner\": self.user['username'],\n \"type\": \"Point\",\n \"hash\": \"hash\",\n \"value\": [0,0,0]\n }\n\n r = self.s.ObjectCreateAsync([obj])\n\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n\n #Check created object ID is in response\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n\n print(resource['_id'])\n\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_create_mesh_object(self):\n obj = {\n \"owner\": self.user['username'],\n \"type\": \"Mesh\",\n \"geometryHash\": \"Mesh.66ec936fc8eb1844581db685e5672f79\",\n \"hash\": \"2e4d67853709316f17e3745cd700a9ed\",\n \"properties\": {\n \"center\": {\n \"type\": \"Point\",\n \"value\": [\n -2.326136578802356,\n 7.41377889150433,\n 0.01525474415516414\n ],\n \"hash\": \"318e1a3b9bf16bf5711170b61b4cd144\",\n \"geometryHash\": \"Point.8012f72d1fd49795101ab099b7dff3cb\"\n },\n \"area\": 1.6718884716988291,\n \"revitFamTYpe\": \"undefined\"\n },\n \"vertices\": [\n -2.6709675788879395,\n 7.420193672180176,\n 0.007017634343355894,\n -2.6617817878723145,\n 7.910780906677246,\n 0.016628438606858253,\n -2.6525962352752686,\n 8.401368141174316,\n 0.026239242404699326,\n -2.6434104442596436,\n 8.891955375671387,\n 0.03585004433989525,\n -2.6342246532440186,\n 9.382542610168457,\n 0.04546085000038147,\n -2.507732629776001,\n 6.9263834953308105,\n 0.005644594319164753,\n -2.498547077178955,\n 7.416970729827881,\n 0.01319583784788847,\n -2.48936128616333,\n 7.907557964324951,\n 0.02074708230793476,\n -2.480175495147705,\n 8.39814567565918,\n 0.028298325836658478,\n -2.47098970413208,\n 8.88873291015625,\n 0.035849571228027344,\n -2.3444979190826416,\n 6.432573318481445,\n 0.004271554294973612,\n -2.3353121280670166,\n 6.923160552978516,\n 0.00976323802024126,\n -2.3261263370513916,\n 7.413747787475586,\n 0.015254922211170197,\n -2.3169405460357666,\n 7.9043354988098145,\n 0.020746605470776558,\n -2.3077549934387207,\n 8.394922256469727,\n 0.02623829059302807,\n -2.181262969970703,\n 5.93876314163208,\n 0.0028985145036131144,\n -2.172077178955078,\n 6.42935037612915,\n 0.006330638192594051,\n -2.162891387939453,\n 6.919937610626221,\n 0.009762762114405632,\n -2.1537058353424072,\n 7.410524845123291,\n 0.013194886036217213,\n -2.1445200443267822,\n 7.9011125564575195,\n 0.016627009958028793,\n -2.0180280208587646,\n 5.444952964782715,\n 0.0015254743630066514,\n -2.0088422298431396,\n 5.935540199279785,\n 0.002898038364946842,\n -1.9996565580368042,\n 6.4261274337768555,\n 0.0042706020176410675,\n -1.9904708862304688,\n 6.916714668273926,\n 0.00564316613599658,\n -1.9812850952148438,\n 7.407302379608154,\n 0.0070157297886908054\n ],\n \"faces\": [\n 1,\n 6,\n 1,\n 0,\n 5,\n 1,\n 7,\n 2,\n 1,\n 6,\n 1,\n 8,\n 3,\n 2,\n 7,\n 1,\n 9,\n 4,\n 3,\n 8,\n 1,\n 11,\n 6,\n 5,\n 10,\n 1,\n 12,\n 7,\n 6,\n 11,\n 1,\n 13,\n 8,\n 7,\n 12,\n 1,\n 14,\n 9,\n 8,\n 13,\n 1,\n 16,\n 11,\n 10,\n 15,\n 1,\n 17,\n 12,\n 11,\n 16,\n 1,\n 18,\n 13,\n 12,\n 17,\n 1,\n 19,\n 14,\n 13,\n 18,\n 1,\n 21,\n 16,\n 15,\n 20,\n 1,\n 22,\n 17,\n 16,\n 21,\n 1,\n 23,\n 18,\n 17,\n 22,\n 1,\n 24,\n 19,\n 18,\n 23\n ]\n }\n\n r = self.s.ObjectCreateAsync([obj])\n\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n\n # Check created object ID is in response\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n\n print(resource['_id'])\n\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_object(self):\n obj = {\n \"type\": \"Line\",\n \"value\": [\n -5689.317811503128,\n -13716.87365524665,\n 3448.9999880790538,\n -5688.317811503128,\n -13717.87365524665,\n 3539.9999880790538\n ],\n }\n\n r = self.s.ObjectCreateAsync([obj])\n\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n\n # Check created object ID is in response\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n\n print(resource['_id'])\n\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n def test_line_objects(self):\n objects = [\n {\n \"type\": \"Line\",\n \"value\": [\n 0,\n 0,\n 0,\n 1,\n 1,\n 1\n ],\n },\n {\n \"type\": \"Line\",\n \"value\": [\n -1,\n -1,\n -1,\n 2,\n 2,\n 2\n ],\n },\n ]\n r = self.s.ObjectCreateAsync(objects)\n\n self.assertIsNotNone(r, self.none_msg('ObjectCreateAsync'))\n self.assertTrue(r['success'])\n self.assertTrue(r['resources'])\n\n # Check created object ID is in response\n resource = r['resources'][0]\n self.assertTrue(resource['_id'])\n\n print(resource['_id'])\n\n self.s.StreamAddObjectAsync(self.test_stream, resource['_id'])\n\n\n\n\n def test_update_object(self):\n \n geometry = {\n \"vertices\": [0.0, 1.0, 2.0, 3.0],\n \"faces\": [1,2,3]\n }\n\n props = {\n 'type': 'RCSlab', \n 'material': 'Concrete'\n }\n data = {'properties': props}\n data.update(geometry)\n r = self.s.ObjectUpdateAsync(self.test_object, data)\n self.assertIsNotNone(r)\n\n #Todo: Look into why user is not authorized to update\n self.assertTrue(r['success'])\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
7,
11,
12,
13,
14
]
}
|
[
7,
11,
12,
13,
14
] |
def lucas():
yield 2
a = 2
b = 1
while True:
yield b
a, b = b, a + b
l = lucas()
for i in range(10):
print('{}: {}'.format(i, next(l)))
|
normal
|
{
"blob_id": "4745c00ca0f3ca4316117228a9d44bdb5df02877",
"index": 7799,
"step-1": "<mask token>\n",
"step-2": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\n<mask token>\n",
"step-3": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\n<mask token>\nfor i in range(10):\n print('{}: {}'.format(i, next(l)))\n",
"step-4": "def lucas():\n yield 2\n a = 2\n b = 1\n while True:\n yield b\n a, b = b, a + b\n\n\nl = lucas()\nfor i in range(10):\n print('{}: {}'.format(i, next(l)))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def solution(S):
# write your code in Python 3.6
# Definitions
log_sep = ','
num_sep = '-'
time_sep = ':'
# Initialization
from collections import defaultdict
# defaultdict initialize missing key to default value -> 0
bill = defaultdict(int)
total = defaultdict(int)
calls = S.splitlines()
maximal = 0
free_number = 0
for call in calls:
# Parsing values
hhmmss, number = call.split(log_sep)
hh, mm, ss = hhmmss.split(time_sep)
hh, mm, ss = int(hh), int(mm), int(ss)
number = int(number.replace(num_sep,''))
# Call duration calculations
minutes = mm + hh * 60
seconds = ss + minutes * 60
# Free number Rule
total[number] += seconds
if total[number] > maximal:
# new maximal
maximal = total[number]
free_number = number
elif total[number] == maximal:
# in case of a tie...
free_number = min(number,free_number)
# Billing Rule
if minutes < 5:
bill[number] += seconds * 3
else:
if ss > 0:
started = 1
else:
started = 0
bill[number] += (minutes + started) * 150
# Free number Rule enforcement
bill[free_number] = 0
return sum(bill.values())
|
normal
|
{
"blob_id": "bf8bbeb408cb75af314ef9f3907456036e731c0b",
"index": 294,
"step-1": "<mask token>\n",
"step-2": "def solution(S):\n log_sep = ','\n num_sep = '-'\n time_sep = ':'\n from collections import defaultdict\n bill = defaultdict(int)\n total = defaultdict(int)\n calls = S.splitlines()\n maximal = 0\n free_number = 0\n for call in calls:\n hhmmss, number = call.split(log_sep)\n hh, mm, ss = hhmmss.split(time_sep)\n hh, mm, ss = int(hh), int(mm), int(ss)\n number = int(number.replace(num_sep, ''))\n minutes = mm + hh * 60\n seconds = ss + minutes * 60\n total[number] += seconds\n if total[number] > maximal:\n maximal = total[number]\n free_number = number\n elif total[number] == maximal:\n free_number = min(number, free_number)\n if minutes < 5:\n bill[number] += seconds * 3\n else:\n if ss > 0:\n started = 1\n else:\n started = 0\n bill[number] += (minutes + started) * 150\n bill[free_number] = 0\n return sum(bill.values())\n",
"step-3": "def solution(S):\n # write your code in Python 3.6\n # Definitions\n log_sep = ','\n num_sep = '-'\n time_sep = ':'\n # Initialization\n from collections import defaultdict\n # defaultdict initialize missing key to default value -> 0\n bill = defaultdict(int)\n total = defaultdict(int)\n calls = S.splitlines()\n maximal = 0\n free_number = 0\n \n for call in calls:\n # Parsing values\n hhmmss, number = call.split(log_sep)\n hh, mm, ss = hhmmss.split(time_sep)\n hh, mm, ss = int(hh), int(mm), int(ss)\n number = int(number.replace(num_sep,''))\n # Call duration calculations\n minutes = mm + hh * 60\n seconds = ss + minutes * 60\n # Free number Rule\n total[number] += seconds\n if total[number] > maximal:\n # new maximal\n maximal = total[number]\n free_number = number\n elif total[number] == maximal:\n # in case of a tie...\n free_number = min(number,free_number)\n # Billing Rule\n if minutes < 5:\n bill[number] += seconds * 3\n else:\n if ss > 0:\n started = 1\n else:\n started = 0\n bill[number] += (minutes + started) * 150\n # Free number Rule enforcement\n bill[free_number] = 0\n return sum(bill.values())\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from data_structures.datacenter import Datacenter, urllib, json,
URL = "http://www.mocky.io/v2/5e539b332e00007c002dacbe"
def get_data(url, max_retries=5, delay_between_retries=1):
"""
Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe
and return it as a JSON object.
Args:
url (str): The url to be fetched.
max_retries (int): Number of retries.
delay_between_retries (int): Delay between retries in seconds.
Returns:
data (dict)
"""
pass # the rest of your logic here
for i in max_retries:
while True:
try
time.sleep(delay_between_tries)
response = urllib.request.urlopen(url)
data = json.loads(response.read())
print (data)
break
except Exception:
continue
def main():
"""
Main entry to our program.
"""
data = get_data(URL)
if not data:
raise ValueError('No data to process')
datacenters = [
Datacenter(key, value)
for key, value in data.items()
]
pass # the rest of your logic here
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "e56a7912b9940b1cab6c19d0047f1f60f0083f66",
"index": 4911,
"step-1": "from data_structures.datacenter import Datacenter, urllib, json,\n\n\nURL = \"http://www.mocky.io/v2/5e539b332e00007c002dacbe\"\n\n\ndef get_data(url, max_retries=5, delay_between_retries=1):\n \"\"\"\n Fetch the data from http://www.mocky.io/v2/5e539b332e00007c002dacbe\n and return it as a JSON object.\n\n Args:\n url (str): The url to be fetched.\n max_retries (int): Number of retries.\n delay_between_retries (int): Delay between retries in seconds.\n Returns:\n data (dict)\n \"\"\"\n pass # the rest of your logic here\n for i in max_retries:\n while True:\n try\n time.sleep(delay_between_tries)\n response = urllib.request.urlopen(url)\n data = json.loads(response.read())\n print (data)\n break\n except Exception:\n continue\n \n \n \n\n\n\n\n\n\ndef main():\n \"\"\"\n Main entry to our program.\n \"\"\"\n\n data = get_data(URL)\n\n if not data:\n raise ValueError('No data to process')\n\n datacenters = [\n Datacenter(key, value)\n for key, value in data.items()\n ]\n\n pass # the rest of your logic here\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: Jack
@datetime: 2018/8/31 13:32
@E-mail: [email protected]
"""
def isValid(s):
stack = []
for ss in s:
if ss in '([{':
stack.append(ss)
if ss in ')]}':
if len(stack) <= 0:
return False
else:
compare = stack.pop()
if (compare == '(' and ss != ')') or (compare == '[' and ss != ']') or (compare == '{' and ss != '}'):
return False
if len(stack) == 0:
return True
else:
return False
if __name__ == '__main__':
print isValid("{[]}")
|
normal
|
{
"blob_id": "607f0aac0d6d2c05737f59803befcff37d559398",
"index": 5117,
"step-1": "#!usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: Jack\n@datetime: 2018/8/31 13:32\n@E-mail: [email protected]\n\"\"\"\n\n\ndef isValid(s):\n stack = []\n for ss in s:\n if ss in '([{':\n stack.append(ss)\n if ss in ')]}':\n if len(stack) <= 0:\n return False\n else:\n compare = stack.pop()\n if (compare == '(' and ss != ')') or (compare == '[' and ss != ']') or (compare == '{' and ss != '}'):\n return False\n if len(stack) == 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n print isValid(\"{[]}\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
version https://git-lfs.github.com/spec/v1
oid sha256:7f0b7267333e6a4a73d3df0ee7f384f7b3cb6ffb14ed2dc8a5894b853bac8957
size 1323
|
normal
|
{
"blob_id": "f1972baee8b399c9a52561c8f015f71cb9922bb0",
"index": 4875,
"step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:7f0b7267333e6a4a73d3df0ee7f384f7b3cb6ffb14ed2dc8a5894b853bac8957\nsize 1323\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask
from flask import render_template
import datetime
from person import Person
import requests
from post import Post
app = Flask(__name__)
all_posts = all_posts = requests.get(
"https://api.npoint.io/5abcca6f4e39b4955965").json()
post_objects = []
for post in all_posts:
post_obj = Post(post["id"], post["title"], post["subtitle"], post["body"])
post_objects.append(post_obj)
@app.route('/')
def home_page():
year = datetime.datetime.today().year
return render_template("index.html",
current_year=year)
@app.route('/guess/<name>')
def guesser(name):
person = Person(name=name)
return render_template("guess.html",
name=person.name,
gender=person.gender,
age=person.age,
country=person.country,
)
@app.route('/blog')
def blog():
return render_template("blog.html", posts=post_objects)
@app.route('/post/<int:id>')
def blog_post(id):
requested_post = None
for post in post_objects:
if post.id == id:
requested_post = post
return render_template("post.html", post=requested_post)
if __name__ == "__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "895ece0b8d45cd64e43f8ddc54824f7647254185",
"index": 2547,
"step-1": "<mask token>\n\n\[email protected]('/guess/<name>')\ndef guesser(name):\n person = Person(name=name)\n return render_template('guess.html', name=person.name, gender=person.\n gender, age=person.age, country=person.country)\n\n\n<mask token>\n\n\[email protected]('/post/<int:id>')\ndef blog_post(id):\n requested_post = None\n for post in post_objects:\n if post.id == id:\n requested_post = post\n return render_template('post.html', post=requested_post)\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor post in all_posts:\n post_obj = Post(post['id'], post['title'], post['subtitle'], post['body'])\n post_objects.append(post_obj)\n\n\[email protected]('/')\ndef home_page():\n year = datetime.datetime.today().year\n return render_template('index.html', current_year=year)\n\n\[email protected]('/guess/<name>')\ndef guesser(name):\n person = Person(name=name)\n return render_template('guess.html', name=person.name, gender=person.\n gender, age=person.age, country=person.country)\n\n\[email protected]('/blog')\ndef blog():\n return render_template('blog.html', posts=post_objects)\n\n\[email protected]('/post/<int:id>')\ndef blog_post(id):\n requested_post = None\n for post in post_objects:\n if post.id == id:\n requested_post = post\n return render_template('post.html', post=requested_post)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nall_posts = all_posts = requests.get(\n 'https://api.npoint.io/5abcca6f4e39b4955965').json()\npost_objects = []\nfor post in all_posts:\n post_obj = Post(post['id'], post['title'], post['subtitle'], post['body'])\n post_objects.append(post_obj)\n\n\[email protected]('/')\ndef home_page():\n year = datetime.datetime.today().year\n return render_template('index.html', current_year=year)\n\n\[email protected]('/guess/<name>')\ndef guesser(name):\n person = Person(name=name)\n return render_template('guess.html', name=person.name, gender=person.\n gender, age=person.age, country=person.country)\n\n\[email protected]('/blog')\ndef blog():\n return render_template('blog.html', posts=post_objects)\n\n\[email protected]('/post/<int:id>')\ndef blog_post(id):\n requested_post = None\n for post in post_objects:\n if post.id == id:\n requested_post = post\n return render_template('post.html', post=requested_post)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask\nfrom flask import render_template\nimport datetime\nfrom person import Person\nimport requests\nfrom post import Post\napp = Flask(__name__)\nall_posts = all_posts = requests.get(\n 'https://api.npoint.io/5abcca6f4e39b4955965').json()\npost_objects = []\nfor post in all_posts:\n post_obj = Post(post['id'], post['title'], post['subtitle'], post['body'])\n post_objects.append(post_obj)\n\n\[email protected]('/')\ndef home_page():\n year = datetime.datetime.today().year\n return render_template('index.html', current_year=year)\n\n\[email protected]('/guess/<name>')\ndef guesser(name):\n person = Person(name=name)\n return render_template('guess.html', name=person.name, gender=person.\n gender, age=person.age, country=person.country)\n\n\[email protected]('/blog')\ndef blog():\n return render_template('blog.html', posts=post_objects)\n\n\[email protected]('/post/<int:id>')\ndef blog_post(id):\n requested_post = None\n for post in post_objects:\n if post.id == id:\n requested_post = post\n return render_template('post.html', post=requested_post)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask\nfrom flask import render_template\nimport datetime\nfrom person import Person\nimport requests\nfrom post import Post\n\napp = Flask(__name__)\nall_posts = all_posts = requests.get(\n \"https://api.npoint.io/5abcca6f4e39b4955965\").json()\npost_objects = []\n\nfor post in all_posts:\n post_obj = Post(post[\"id\"], post[\"title\"], post[\"subtitle\"], post[\"body\"])\n post_objects.append(post_obj)\n\n\[email protected]('/')\ndef home_page():\n year = datetime.datetime.today().year\n return render_template(\"index.html\",\n current_year=year)\n\n\[email protected]('/guess/<name>')\ndef guesser(name):\n person = Person(name=name)\n return render_template(\"guess.html\",\n name=person.name,\n gender=person.gender,\n age=person.age,\n country=person.country,\n )\n\n\[email protected]('/blog')\ndef blog():\n return render_template(\"blog.html\", posts=post_objects)\n\n\[email protected]('/post/<int:id>')\ndef blog_post(id):\n requested_post = None\n for post in post_objects:\n if post.id == id:\n requested_post = post\n return render_template(\"post.html\", post=requested_post)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def main():
reader = csv.reader(row for row in fileinput.input() if not row.
startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('inputfile', help=
'if specified reads a *.lp formatted file otherwise standard in')
<|reserved_special_token_0|>
def main():
reader = csv.reader(row for row in fileinput.input() if not row.
startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser(description=
'Takes an input of *.lp format and sets all radii to the same value')
parser.add_argument('inputfile', help=
'if specified reads a *.lp formatted file otherwise standard in')
R = 1
def main():
reader = csv.reader(row for row in fileinput.input() if not row.
startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import sys
import csv
import math
import collections
import argparse
import fileinput
import lp
parser = argparse.ArgumentParser(description=
'Takes an input of *.lp format and sets all radii to the same value')
parser.add_argument('inputfile', help=
'if specified reads a *.lp formatted file otherwise standard in')
R = 1
def main():
reader = csv.reader(row for row in fileinput.input() if not row.
startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sys
import csv
import math
import collections
import argparse
import fileinput
import lp
parser = argparse.ArgumentParser(description="Takes an input of *.lp format and sets all radii to the same value")
parser.add_argument("inputfile", help="if specified reads a *.lp formatted file otherwise standard in")
R = 1
def main():
reader = csv.reader(row for row in fileinput.input() if not row.startswith('#'))
circles = lps.parse_lps(reader)
for circle in circles:
circle.r = R
print(circle)
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "00f62fec7f5372c5798b0ebf3f3783233360581e",
"index": 2987,
"step-1": "<mask token>\n\n\ndef main():\n reader = csv.reader(row for row in fileinput.input() if not row.\n startswith('#'))\n circles = lps.parse_lps(reader)\n for circle in circles:\n circle.r = R\n print(circle)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('inputfile', help=\n 'if specified reads a *.lp formatted file otherwise standard in')\n<mask token>\n\n\ndef main():\n reader = csv.reader(row for row in fileinput.input() if not row.\n startswith('#'))\n circles = lps.parse_lps(reader)\n for circle in circles:\n circle.r = R\n print(circle)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Takes an input of *.lp format and sets all radii to the same value')\nparser.add_argument('inputfile', help=\n 'if specified reads a *.lp formatted file otherwise standard in')\nR = 1\n\n\ndef main():\n reader = csv.reader(row for row in fileinput.input() if not row.\n startswith('#'))\n circles = lps.parse_lps(reader)\n for circle in circles:\n circle.r = R\n print(circle)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nimport csv\nimport math\nimport collections\nimport argparse\nimport fileinput\nimport lp\nparser = argparse.ArgumentParser(description=\n 'Takes an input of *.lp format and sets all radii to the same value')\nparser.add_argument('inputfile', help=\n 'if specified reads a *.lp formatted file otherwise standard in')\nR = 1\n\n\ndef main():\n reader = csv.reader(row for row in fileinput.input() if not row.\n startswith('#'))\n circles = lps.parse_lps(reader)\n for circle in circles:\n circle.r = R\n print(circle)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\nimport sys\nimport csv\nimport math\n\nimport collections\nimport argparse\nimport fileinput\n\nimport lp\n\nparser = argparse.ArgumentParser(description=\"Takes an input of *.lp format and sets all radii to the same value\")\nparser.add_argument(\"inputfile\", help=\"if specified reads a *.lp formatted file otherwise standard in\")\n\nR = 1\n\ndef main():\n reader = csv.reader(row for row in fileinput.input() if not row.startswith('#'))\n\n circles = lps.parse_lps(reader)\n\n for circle in circles:\n circle.r = R\n print(circle)\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if d == m:
print(a[0])
elif 0 < d < m:
for i in range(hmin, hmax + 1):
fin1 = a[0] - i + m
if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:
print(a[0] - i)
found = 1
break
if found == 0:
i = 0
while i < n - 1:
found = 0
invalid = 0
d = a[i + 1] - a[i]
print(a[i], a[i + 1], d)
if d < hmin or d > hmax:
i = i + 1
continue
for j in range(i + 1, n):
d = a[j] - a[j - 1]
print(a[i], a[j], d)
if d < hmin or d > hmax:
i = j - 1
invalid = 1
break
if a[j] - a[i] > m:
invalid = 1
break
if a[j] - a[i] == m:
found = 1
invalid = 0
break
if invalid == 1:
i = i + 1
continue
if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:
print(a[i])
break
i = i + 1
if n == 1:
print(a[0] + hmax - m)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
n = int(input().strip())
a = list(input().strip().split(' '))
H = list(input().strip().split(' '))
a = [int(i) for i in a]
m = int(H[0])
hmin = int(H[1])
hmax = int(H[2])
pos = 0
found = 0
d = a[-1] - a[0]
if d == m:
print(a[0])
elif 0 < d < m:
for i in range(hmin, hmax + 1):
fin1 = a[0] - i + m
if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:
print(a[0] - i)
found = 1
break
if found == 0:
i = 0
while i < n - 1:
found = 0
invalid = 0
d = a[i + 1] - a[i]
print(a[i], a[i + 1], d)
if d < hmin or d > hmax:
i = i + 1
continue
for j in range(i + 1, n):
d = a[j] - a[j - 1]
print(a[i], a[j], d)
if d < hmin or d > hmax:
i = j - 1
invalid = 1
break
if a[j] - a[i] > m:
invalid = 1
break
if a[j] - a[i] == m:
found = 1
invalid = 0
break
if invalid == 1:
i = i + 1
continue
if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:
print(a[i])
break
i = i + 1
if n == 1:
print(a[0] + hmax - m)
<|reserved_special_token_1|>
import sys
n = int(input().strip())
a = list(input().strip().split(' '))
H = list(input().strip().split(' '))
a = [int(i) for i in a]
m = int(H[0])
hmin = int(H[1])
hmax = int(H[2])
pos = 0
found = 0
d = a[-1] - a[0]
if d == m:
print(a[0])
elif 0 < d < m:
for i in range(hmin, hmax + 1):
fin1 = a[0] - i + m
if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:
print(a[0] - i)
found = 1
break
if found == 0:
i = 0
while i < n - 1:
found = 0
invalid = 0
d = a[i + 1] - a[i]
print(a[i], a[i + 1], d)
if d < hmin or d > hmax:
i = i + 1
continue
for j in range(i + 1, n):
d = a[j] - a[j - 1]
print(a[i], a[j], d)
if d < hmin or d > hmax:
i = j - 1
invalid = 1
break
if a[j] - a[i] > m:
invalid = 1
break
if a[j] - a[i] == m:
found = 1
invalid = 0
break
if invalid == 1:
i = i + 1
continue
if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:
print(a[i])
break
i = i + 1
if n == 1:
print(a[0] + hmax - m)
<|reserved_special_token_1|>
import sys
n=int(input().strip())
a=list(input().strip().split(' '))
H=list(input().strip().split(' '))
a = [int(i) for i in a]
m=int(H[0])
hmin=int(H[1])
hmax=int(H[2])
pos=0
found = 0
d=a[-1]-a[0]
if(d==m):
print(a[0])
elif(0<d<m):
for i in range(hmin, hmax+1):
fin1 = a[0]-i+m
if(hmin<=fin1-a[-1]<=hmax or fin1==a[-1]):
print(a[0]-i)
found = 1
break
if(found == 0):
i = 0
while(i<(n-1)):
found = 0
invalid = 0
d = a[i+1]-a[i]
print(a[i], a[i+1], d)
if(d<hmin or d>hmax):
i=i+1
continue
for j in range(i+1, n):
d = a[j]-a[j-1]
print(a[i], a[j], d)
if(d<hmin or d>hmax):
i = j-1
invalid = 1
break
if(a[j]-a[i]>m):
invalid = 1
break
if(a[j]-a[i]==m):
found = 1
invalid = 0
break
if(invalid == 1):
i = i+1
continue
if(found == 1 or (a[-1]-a[i]+hmin<=m and a[-1]-a[i]+hmax>=m)):
print(a[i])
break
i = i+1
if(n == 1):
print(a[0]+hmax-m)
|
flexible
|
{
"blob_id": "3da82bcff0a4f91c1245892bc01e9f743ea354a8",
"index": 4484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-3": "<mask token>\nn = int(input().strip())\na = list(input().strip().split(' '))\nH = list(input().strip().split(' '))\na = [int(i) for i in a]\nm = int(H[0])\nhmin = int(H[1])\nhmax = int(H[2])\npos = 0\nfound = 0\nd = a[-1] - a[0]\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-4": "import sys\nn = int(input().strip())\na = list(input().strip().split(' '))\nH = list(input().strip().split(' '))\na = [int(i) for i in a]\nm = int(H[0])\nhmin = int(H[1])\nhmax = int(H[2])\npos = 0\nfound = 0\nd = a[-1] - a[0]\nif d == m:\n print(a[0])\nelif 0 < d < m:\n for i in range(hmin, hmax + 1):\n fin1 = a[0] - i + m\n if hmin <= fin1 - a[-1] <= hmax or fin1 == a[-1]:\n print(a[0] - i)\n found = 1\n break\nif found == 0:\n i = 0\n while i < n - 1:\n found = 0\n invalid = 0\n d = a[i + 1] - a[i]\n print(a[i], a[i + 1], d)\n if d < hmin or d > hmax:\n i = i + 1\n continue\n for j in range(i + 1, n):\n d = a[j] - a[j - 1]\n print(a[i], a[j], d)\n if d < hmin or d > hmax:\n i = j - 1\n invalid = 1\n break\n if a[j] - a[i] > m:\n invalid = 1\n break\n if a[j] - a[i] == m:\n found = 1\n invalid = 0\n break\n if invalid == 1:\n i = i + 1\n continue\n if found == 1 or a[-1] - a[i] + hmin <= m and a[-1] - a[i] + hmax >= m:\n print(a[i])\n break\n i = i + 1\nif n == 1:\n print(a[0] + hmax - m)\n",
"step-5": "import sys\n\nn=int(input().strip())\na=list(input().strip().split(' '))\nH=list(input().strip().split(' '))\na = [int(i) for i in a]\nm=int(H[0])\nhmin=int(H[1])\nhmax=int(H[2])\npos=0\nfound = 0\nd=a[-1]-a[0]\nif(d==m):\n print(a[0])\nelif(0<d<m):\n for i in range(hmin, hmax+1):\n fin1 = a[0]-i+m\n if(hmin<=fin1-a[-1]<=hmax or fin1==a[-1]):\n print(a[0]-i)\n found = 1\n break\nif(found == 0):\n i = 0 \n while(i<(n-1)):\n found = 0\n invalid = 0\n d = a[i+1]-a[i]\n print(a[i], a[i+1], d)\n if(d<hmin or d>hmax):\n i=i+1\n continue\n for j in range(i+1, n):\n d = a[j]-a[j-1]\n print(a[i], a[j], d)\n if(d<hmin or d>hmax):\n i = j-1\n invalid = 1\n break\n if(a[j]-a[i]>m):\n invalid = 1\n break\n if(a[j]-a[i]==m):\n found = 1\n invalid = 0\n break\n if(invalid == 1):\n i = i+1\n continue\n if(found == 1 or (a[-1]-a[i]+hmin<=m and a[-1]-a[i]+hmax>=m)): \n print(a[i])\n break\n i = i+1\nif(n == 1):\n print(a[0]+hmax-m)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST[
'computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.
POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST[
'minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST[
'maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.
POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.
POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.
POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.
POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': '请输入整数'})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST[
'brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, 'Dashio/computers.html', ctx)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST[
'computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.
POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST[
'minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST[
'maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.
POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.
POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.
POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.
POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': '请输入整数'})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST[
'brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, 'Dashio/computers.html', ctx)
<|reserved_special_token_0|>
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=
request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id,
user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST[
'computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.
POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST[
'minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST[
'maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.
POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.
POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.
POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.
POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': '请输入整数'})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST[
'brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, 'Dashio/computers.html', ctx)
@transaction.atomic
def details(request, computer_id):
rtx = {}
rtx['isUser'] = request.session['type'] == 'user'
rtx['computer'] = get_object_or_404(Computer, pk=computer_id)
rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=
computer_id).count()
rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)
rtx['user_id'] = request.session['id']
rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id
).count()
rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id
=computer_id).order_by('-comment_date')
rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id
).order_by('-buy_time')[:5]
if rtx['isUser']:
rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[
'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'
return render(request, 'Dashio/computer_detail.html', rtx)
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=
request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id,
user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
<|reserved_special_token_1|>
from django.shortcuts import *
from shop.models import *
from django.db import transaction
from django.core.exceptions import *
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST[
'computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.
POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST[
'minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST[
'maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.
POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.
POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.
POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.
POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': '请输入整数'})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST[
'brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, 'Dashio/computers.html', ctx)
@transaction.atomic
def details(request, computer_id):
rtx = {}
rtx['isUser'] = request.session['type'] == 'user'
rtx['computer'] = get_object_or_404(Computer, pk=computer_id)
rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=
computer_id).count()
rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)
rtx['user_id'] = request.session['id']
rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id
).count()
rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id
=computer_id).order_by('-comment_date')
rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id
).order_by('-buy_time')[:5]
if rtx['isUser']:
rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[
'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'
return render(request, 'Dashio/computer_detail.html', rtx)
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=
request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id,
user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(
computer_id,)))
<|reserved_special_token_1|>
from django.shortcuts import *
from shop.models import *
from django.db import transaction
from django.core.exceptions import *
@transaction.atomic
def computers(request):
ctx = {}
computer = Computer.objects.all()
ctx['brand'] = Brand.objects.all()
if request.method == 'POST':
if request.POST['computer_id'] != '':
computer = computer.filter(computer_id__icontains=request.POST['computer_id'])
if request.POST['cpu'] != '':
computer = computer.filter(cpu__icontains=request.POST['cpu'])
if request.POST['graphics_card'] != '':
computer = computer.filter(graphics_card__icontains=request.POST['graphics_card'])
try:
if request.POST['minMemory'] != '':
computer = computer.filter(memory__gte=int(request.POST['minMemory']))
if request.POST['maxMemory'] != '':
computer = computer.exclude(memory__gte=int(request.POST['maxMemory']))
if request.POST['minssd'] != '':
computer = computer.filter(ssd_capacity__gte=int(request.POST['minssd']))
if request.POST['maxssd'] != '':
computer = computer.exclude(ssd_capacity__gte=int(request.POST['maxssd']))
if request.POST['minDisk'] != '':
computer = computer.filter(disk_capacity__gte=int(request.POST['minDisk']))
if request.POST['maxDisk'] != '':
computer = computer.exclude(disk_capacity__gte=int(request.POST['maxDisk']))
except ValueError:
return render(request, 'Dashio/error.html', {'error': "请输入整数"})
if request.POST.get('brand', '') != '':
print(request.POST['brand'])
computer = computer.filter(brand__name__icontains=request.POST['brand'])
if request.POST['sort'] != '':
sortKey = request.POST['sortType'] + request.POST['sort']
computer = computer.order_by(sortKey)
ctx['computer'] = computer
return render(request, "Dashio/computers.html", ctx)
@transaction.atomic
def details(request, computer_id):
rtx = {}
rtx['isUser'] = request.session['type'] == 'user'
rtx['computer'] = get_object_or_404(Computer, pk=computer_id)
rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=computer_id).count()
rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)
rtx['user_id'] = request.session['id']
rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id).count()
rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id=computer_id).order_by('-comment_date')
rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id).order_by('-buy_time')[:5]
if rtx['isUser']:
rtx['mark'] = ('收藏' if mark.objects.filter(user_id__user_id=rtx['user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏')
return render(request, 'Dashio/computer_detail.html', rtx)
@transaction.atomic
def post(request, user_id, computer_id):
if request.method == 'POST':
computer = Computer.objects.get(pk=computer_id)
user = User.objects.get(pk=user_id)
computer_comment(computer_id=computer, user_id=user, content=request.POST['comment']).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))
def makeMark(request, computer_id, user_id):
try:
m = mark.objects.get(computer_id__computer_id=computer_id, user_id__user_id=user_id)
m.delete()
except ObjectDoesNotExist:
computer = get_object_or_404(Computer, pk=computer_id)
user = get_object_or_404(User, pk=user_id)
mark(computer_id=computer, user_id=user).save()
return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))
|
flexible
|
{
"blob_id": "18689741a33e6d17e694ee0619a1f36d8d178cbb",
"index": 3223,
"step-1": "<mask token>\n\n\[email protected]\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\n<mask token>\n\n\[email protected]\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-3": "<mask token>\n\n\[email protected]\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\[email protected]\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=\n computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id\n =computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).order_by('-buy_time')[:5]\n if rtx['isUser']:\n rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[\n 'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'\n return render(request, 'Dashio/computer_detail.html', rtx)\n\n\[email protected]\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-4": "from django.shortcuts import *\nfrom shop.models import *\nfrom django.db import transaction\nfrom django.core.exceptions import *\n\n\[email protected]\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST[\n 'computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.\n POST['graphics_card'])\n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST[\n 'minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST[\n 'maxMemory']))\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.\n POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.\n POST['maxssd']))\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.\n POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.\n POST['maxDisk']))\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': '请输入整数'})\n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST[\n 'brand'])\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n ctx['computer'] = computer\n return render(request, 'Dashio/computers.html', ctx)\n\n\[email protected]\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=\n computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id\n =computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id\n ).order_by('-buy_time')[:5]\n if rtx['isUser']:\n rtx['mark'] = '收藏' if mark.objects.filter(user_id__user_id=rtx[\n 'user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏'\n return render(request, 'Dashio/computer_detail.html', rtx)\n\n\[email protected]\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=\n request.POST['comment']).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id,\n user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n return HttpResponseRedirect(reverse('shop:computerDetail', args=(\n computer_id,)))\n",
"step-5": "from django.shortcuts import *\nfrom shop.models import *\nfrom django.db import transaction\nfrom django.core.exceptions import *\n\[email protected]\ndef computers(request):\n ctx = {}\n computer = Computer.objects.all()\n ctx['brand'] = Brand.objects.all()\n\n if request.method == 'POST':\n if request.POST['computer_id'] != '':\n computer = computer.filter(computer_id__icontains=request.POST['computer_id'])\n if request.POST['cpu'] != '':\n computer = computer.filter(cpu__icontains=request.POST['cpu'])\n if request.POST['graphics_card'] != '':\n computer = computer.filter(graphics_card__icontains=request.POST['graphics_card'])\n \n try:\n if request.POST['minMemory'] != '':\n computer = computer.filter(memory__gte=int(request.POST['minMemory']))\n if request.POST['maxMemory'] != '':\n computer = computer.exclude(memory__gte=int(request.POST['maxMemory']))\n\n if request.POST['minssd'] != '':\n computer = computer.filter(ssd_capacity__gte=int(request.POST['minssd']))\n if request.POST['maxssd'] != '':\n computer = computer.exclude(ssd_capacity__gte=int(request.POST['maxssd']))\n\n if request.POST['minDisk'] != '':\n computer = computer.filter(disk_capacity__gte=int(request.POST['minDisk']))\n if request.POST['maxDisk'] != '':\n computer = computer.exclude(disk_capacity__gte=int(request.POST['maxDisk']))\n\n except ValueError:\n return render(request, 'Dashio/error.html', {'error': \"请输入整数\"})\n \n if request.POST.get('brand', '') != '':\n print(request.POST['brand'])\n computer = computer.filter(brand__name__icontains=request.POST['brand'])\n\n if request.POST['sort'] != '':\n sortKey = request.POST['sortType'] + request.POST['sort']\n computer = computer.order_by(sortKey)\n\n ctx['computer'] = computer\n return render(request, \"Dashio/computers.html\", ctx)\n\[email protected]\ndef details(request, computer_id):\n rtx = {}\n rtx['isUser'] = request.session['type'] == 'user'\n rtx['computer'] = get_object_or_404(Computer, pk=computer_id)\n rtx['markAmount'] = mark.objects.filter(computer_id__computer_id=computer_id).count()\n rtx['sell'] = Sell.objects.filter(computer_id__computer_id=computer_id)\n rtx['user_id'] = request.session['id']\n rtx['sellAmount'] = Buy.objects.filter(computer_id__computer_id=computer_id).count()\n rtx['comments'] = computer_comment.objects.filter(computer_id__computer_id=computer_id).order_by('-comment_date')\n rtx['buys'] = Buy.objects.filter(computer_id__computer_id=computer_id).order_by('-buy_time')[:5]\n \n if rtx['isUser']:\n rtx['mark'] = ('收藏' if mark.objects.filter(user_id__user_id=rtx['user_id'], computer_id=rtx['computer']).count() == 0 else '取消收藏')\n\n return render(request, 'Dashio/computer_detail.html', rtx)\n\[email protected]\ndef post(request, user_id, computer_id):\n if request.method == 'POST':\n computer = Computer.objects.get(pk=computer_id)\n user = User.objects.get(pk=user_id)\n computer_comment(computer_id=computer, user_id=user, content=request.POST['comment']).save()\n \n return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))\n\ndef makeMark(request, computer_id, user_id):\n try:\n m = mark.objects.get(computer_id__computer_id=computer_id, user_id__user_id=user_id)\n m.delete()\n except ObjectDoesNotExist:\n computer = get_object_or_404(Computer, pk=computer_id)\n user = get_object_or_404(User, pk=user_id)\n mark(computer_id=computer, user_id=user).save()\n \n return HttpResponseRedirect(reverse('shop:computerDetail', args=(computer_id, )))",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if not int(i) % 2:
y.append(x[i])
else:
z.append(x[i])
print(''.join(y) + ' ' + ''.join(z))
<|reserved_special_token_1|>
t = eval(input())
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if not int(i) % 2:
y.append(x[i])
else:
z.append(x[i])
print(''.join(y) + ' ' + ''.join(z))
<|reserved_special_token_1|>
t = eval(input())
while t:
t -= 1
y = []
z = []
x = str(input())
for i in range(len(x)):
if (not int(i)%2):
y.append(x[i])
else:
z.append(x[i])
print("".join(y) + " " + "".join(z))
|
flexible
|
{
"blob_id": "ac32fb5fcd71790f9dbf0794992a9dc92a202c9b",
"index": 7972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n",
"step-3": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if not int(i) % 2:\n y.append(x[i])\n else:\n z.append(x[i])\n print(''.join(y) + ' ' + ''.join(z))\n",
"step-4": "t = eval(input())\nwhile t:\n t -= 1\n y = []\n z = []\n x = str(input())\n for i in range(len(x)):\n if (not int(i)%2):\n y.append(x[i])\n else:\n z.append(x[i])\n print(\"\".join(y) + \" \" + \"\".join(z))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!usr/bin/env python
#-*- coding:utf-8 -*-
# this model is for decision tree
# objective: To cluster different service
# JialongLi 2017/03/18
import re
import os
import sys
import pickle
import copy
import random
import pydotplus
USER_NUM = 1000
reload(sys)
sys.setdefaultencoding( "utf-8" )
from sklearn import tree
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
# 0 represent Sunday, 1: Monday, 6: Saturday, 0: Sunday
day_index = {'0507': 1, '0508': 2, '0509': 3, '0510': 4, '0511': 5, '0512': 6, '0513': 0,
'0604': 1, '0605': 2, '0606': 3, '0607': 4, '0608': 5, '0609': 6, '0610': 0,
'0702': 1, '0703': 2, '0704': 3, '0705': 4, '0706': 5, '0707': 6, '0708': 0,
'0806': 1, '0807': 2, '0808': 3, '0809': 4, '0810': 5, '0811': 6, '0812': 0}
service_type = ['I', 'F', 'W', 'G', 'S', 'V']
# get activity_dict
# user's activity: default value is 'F'
# format: {id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
def get_activity_dict(activity_dict_path):
pkl_file = open(activity_dict_path, 'rb')
activity_dict = pickle.load(pkl_file)
pkl_file.close()
return activity_dict
# data are divided into train data and test data
# first three weeks: train data; last week: test data
# train_dict and test_dict are subset of activity_dict, id format is different
# activity_dict format: {real id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
# user_id_index: key = number, value = real id
def data_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path):
train_dict = {}
test_dict = {}
user_count = 0
user_id_index = {}
for key_0, value_0 in activity_dict.items(): # key_0: real user_id
train_dict[user_count] = {}
test_dict[user_count] = {}
user_id_index[user_count] = key_0
for key, value in value_0.items():
if key[1] == '8': # data of August, test set
test_dict[user_count][key] = value
else:
train_dict[user_count][key] = value # train set
user_count += 1
output_1 = open(train_dict_path, 'wb')
pickle.dump(train_dict, output_1)
output_2 = open(test_dict_path, 'wb')
pickle.dump(test_dict, output_2)
output_3 = open(user_id_index_path, 'wb')
pickle.dump(user_id_index, output_3)
output_1.close()
output_2.close()
output_3.close()
# get train data and test data
# train_dict, test_dict format: {number id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}
def get_data(train_dict_path, test_dict_path, user_id_index_path):
pkl_file_1 = open(train_dict_path, 'rb')
pkl_file_2 = open(test_dict_path, 'rb')
pkl_file_3 = open(user_id_index_path, 'rb')
train_dict = pickle.load(pkl_file_1)
test_dict = pickle.load(pkl_file_2)
user_id_index = pickle.load(pkl_file_3)
pkl_file_1.close()
pkl_file_2.close()
pkl_file_3.close()
return train_dict, test_dict, user_id_index
# get profile
def get_profile(profile_path):
pkl_file = open(profile_path, 'rb')
profile = pickle.load(pkl_file)
return profile
# select different features
# feature format: [user_id, gender, age, edu, job, hour, date], 7 features
# profile: dict, {real user_id: [gender, age, edu, job]}
# feature format: double list, outer list element is a sample: [number user_id, gender, age, edu, job, hour, date]
# category format: list, element is service type, length = feature
def feature_select(data_dict, profile, user_id_index, is_over_sampling):
feature = []
category = []
over_sampling_num = 0
for user_id, all_dates in data_dict.items():
real_user_id = user_id_index[user_id]
one_user_profile = copy.deepcopy(profile[real_user_id]) # gender, age, edu, job
one_user_profile.insert(0, user_id) # insert user_id
for date, activity in all_dates.items():
for i in range(len(activity)):
if 1: #activity[i] != 'F': # do not add 'F'
sample = copy.deepcopy(one_user_profile)
#del(sample[1:4])
sample.append(i) #(int(i/6)) # i represents hour
sample.append(day_index[date]) # day_index: 7 days in one week
feature.append(sample)
#category.append(activity[i])
if activity[i] == 'F':
category.append('F')
else:
category.append('O')
if is_over_sampling and len(sample) > 5: # make sure that features are completed
if activity[i] != 'F':
sample_over = [[] for k in range(over_sampling_num)]
for j in range(over_sampling_num):
sample_over[j] = copy.deepcopy(sample)
sample_over[j][-3] = random.randint(0, 8) # random disturbance in job feature
feature.append(sample_over[j])
category.append('O')
return feature, category
# build features, all features
# False means test data do not need over sampling
def feature_build(train_dict, test_dict, profile, user_id_index):
feature_train, category_train = feature_select(train_dict, profile, user_id_index, True)
feature_test, category_test = feature_select(test_dict, profile, user_id_index, False)
return feature_train, feature_test, category_train, category_test
# calculating the hit rate
def cal_hit_rate(category_predict, category_test):
hit_count = 0
sample_test_count = len(category_predict)
for i in range(sample_test_count):
if category_predict[i] == category_test[i]:
hit_count += 1
hit_rate = float(hit_count) / float(sample_test_count)
print 'hit rate: ' + str(round(hit_rate, 4) * 100) + '%'
# calculating F value
def calculating_F_value(category_predict, category_test):
n_predict = 0
n_origin = 0
hit_count = 0
for item in category_predict:
if item != 'F':
n_predict += 1
for item in category_test:
if item != 'F':
n_origin += 1
for i in range(len(category_predict)):
if category_predict[i] != 'F' and category_predict[i] == category_test[i]:
hit_count += 1
precision = float(hit_count) / float(n_predict)
recall = float(hit_count) / float(n_origin)
F_value = 2 * precision * recall / (precision + recall)
print 'n_predict: ' + str(n_predict)
print 'n_origin: ' + str(n_origin)
print 'precision: ' + str(round(precision, 3))
print 'recall: ' + str(round(recall, 3))
print 'F_value: ' + str(round(F_value, 3))
# 1. select the service type using most in that period in past days
# 2. if user did not use service in that period before, select the service type using most in past days
# 3. if user did not use service before, select service randomly
# service_count_hour: key = (user_id, hour, service_type) value = count
# service_count_past: key = (user_id, service_type) value = count
# service_hour: key = (user_id, hour), value = [service_type, count]
# service_past: key = user_id, value = [service_type, count]
def conventional_method_Mused(feature_train, feature_test, category_train):
if len(feature_train[0]) != 7:
print 'feature wrong'
service_count_hour = {}
service_count_past = {}
for i in range(len(feature_train)):
key_hour = (feature_train[i][0], feature_train[i][5], category_train[i])
if key_hour not in service_count_hour:
service_count_hour[key_hour] = 1
else:
service_count_hour[key_hour] += 1
key_past = (feature_train[i][0], category_train[i])
if key_past not in service_count_past:
service_count_past[key_past] = 1
else:
service_count_past[key_past] += 1
service_hour = {}
service_past = {}
for key, value in service_count_hour.items():
key_hour = (key[0], key[1])
if key_hour not in service_hour:
service_hour[key_hour] = [key[2], value]
else:
if value > service_hour[key_hour][1]:
service_hour[key_hour] = [key[2], value]
else:
pass
for key, value in service_count_past.items():
key_past = key[0]
if key_past not in service_past:
service_past[key_past] = [key[1], value]
else:
if value > service_past[key_past][1]:
service_past[key_past] = [key[1], value]
else:
pass
category_predict = []
for i in range(len(feature_test)):
key_0 = (feature_test[i][0], feature_test[i][5])
key_1 = feature_test[i][0]
if key_0 in service_hour:
value_0 = service_hour[key_0]
category_predict.append(value_0[0])
elif key_1 in service_past:
value_1 = service_past[key_1]
category_predict.append(value_1[0])
else:
random_num = random.randint(0, len(service_type)-1)
category_predict.append(service_type[random_num])
return category_predict
# method 2: service in last week
def conventional_method_Lweek(feature_train, feature_test, category_train):
if len(feature_train[0]) != 7:
print 'feature wrong'
category_predict = ['FFF' for i in range(len(feature_test))]
for i in range(len(feature_train)):
sample = feature_train[i]
user_id = sample[0]
hour = sample[-2]
date = sample[-1]
if date == 0: # 0 means it is Sunday and should be the last
date = 7
else:
pass
service_position = user_id * 168 + (date - 1) * 24 + hour
category_predict[service_position] = category_train[i]
return category_predict
# decision tree
def decision_tree(feature_train, feature_test, category_train):
clf = tree.DecisionTreeClassifier()
clf = clf.fit(feature_train, category_train)
category_predict = clf.predict(feature_test) # the format of category_predict is weird
category_Dtree = []
for item in category_predict:
if item == 'F':
category_Dtree.append('F')
else:
category_Dtree.append('O')
return category_Dtree
# random forests
def random_forests(feature_train, feature_test, category_train):
clf = RandomForestClassifier(n_estimators = 80)
clf = clf.fit(feature_train, category_train)
category_predict = clf.predict(feature_test)
category_RF = []
for item in category_predict:
if item == 'F':
category_RF.append('F')
else:
category_RF.append('O')
return category_RF
# save user_activity as pkl file for migration.py
def user_activity_save(user_activity, user_activity_path):
output = open(user_activity_path, 'wb')
pickle.dump(user_activity, output)
output.close()
# user_activity is for migration.py
# key = user_id, range(1000), value = ['F', 'G'...], length is 7 * 24 = 168
def activity_restore(feature, category):
if len(feature[0]) != 7:
print 'feature wrong'
user_activity = {}
for i in range(USER_NUM):
user_activity[i] = ['FFF' for j in range(168)]
for i in range(len(feature)):
sample = feature[i]
user_id = sample[0]
hour = sample[5]
date = sample[-1]
if date == 0: # 0 means it is Sunday and should be the last
date = 7
else:
pass
position = (date - 1) * 24 + hour
user_activity[user_id][position] = category[i]
return user_activity
def counting_accuate_rate(category_Dtree, category_test):
on_on = 0
on_off = 0
off_on = 0
off_off = 0
print len(category_test)
print len(category_Dtree)
for i in range(21504): #(len(category_Dtree)):
if category_Dtree[i] == 'O' and category_test[i] == 'O':
on_on += 1
elif category_Dtree[i] == 'O' and category_test[i] == 'F':
on_off += 1
elif category_Dtree[i] == 'F' and category_test[i] == 'O':
off_on += 1
else:
off_off += 1
print 'on_on' + '\t' + str(on_on)
print 'on_off' + '\t' + str(on_off)
print 'off_on' + '\t' + str(off_on)
print 'off_off' + '\t' + str(off_off)
# save file for sleep.py
def save_file_for_sleep(category_predict, category_test):
category_predict_path = '../data/category_predict_Dtree.pkl'
category_test_path = '../data/category_test.pkl'
output_1 = open(category_predict_path, 'wb')
pickle.dump(category_predict, output_1)
output_2 = open(category_test_path, 'wb')
pickle.dump(category_test, output_2)
output_1.close()
output_2.close()
if __name__ == '__main__':
'''
activity_dict_path = '../data/activity_dict.pkl'
activity_dict = get_activity_dict(activity_dict_path)
train_dict_path = '../data/train_dict.pkl'
test_dict_path = '../data/test_dict.pkl'
user_id_index_path = '../data/user_id_index.pkl'
data_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path)
'''
train_dict_path = '../data/train_dict.pkl'
test_dict_path = '../data/test_dict.pkl'
user_id_index_path = '../data/user_id_index.pkl'
train_dict, test_dict, user_id_index = get_data(train_dict_path, test_dict_path, user_id_index_path)
profile_path = '../data/profile.pkl'
profile = get_profile(profile_path)
feature_train, feature_test, category_train, category_test = feature_build(train_dict, test_dict, profile, user_id_index)
print 'feature_train sample: ' + str(feature_train[1000])
print 'feature_test sample: ' + str(feature_test[1000])
# decision tree
category_Dtree = decision_tree(feature_train, feature_test, category_train)
# random_forests
#category_RF = random_forests(feature_train, feature_test, category_train)
# conventional method: most-used service
#category_Mused = conventional_method_Mused(feature_train, feature_test, category_train)
# conventional method: last-week service
#category_Lweek = conventional_method_Lweek(feature_train, feature_test, category_train)
#cal_hit_rate(category_Dtree, category_test)
#calculating_F_value(category_Dtree, category_test)
#counting_accuate_rate(category_Dtree, category_test)
#save_file_for_sleep(category_Dtree, category_test)
# this part is for migration.py
'''
# origin data, user_activity_origin is users' real behavior
user_activity_origin_path = '../data/user_activity_test/user_activity_origin.pkl'
user_activity_origin = activity_restore(feature_test, category_test)
user_activity_save(user_activity_origin, user_activity_origin_path)
'''
'''
# predition data using decision_tree
user_activity_Dtree_path = '../data/user_activity_test/user_activity_Dtree.pkl'
user_activity_Dtree = activity_restore(feature_test, category_Dtree)
user_activity_save(user_activity_Dtree, user_activity_Dtree_path)
'''
'''
# predition data according to users' most-used service
user_activity_Mused_path = '../data/user_activity_test/user_activity_Mused.pkl'
user_activity_Mused = activity_restore(feature_test, category_Mused)
user_activity_save(user_activity_Mused, user_activity_Mused_path)
'''
'''
# predition data according to users' last-week service
user_activity_Lweek_path = '../data/user_activity_test/user_activity_Lweek.pkl'
user_activity_Lweek = activity_restore(feature_test, category_Lweek)
user_activity_save(user_activity_Lweek, user_activity_Lweek_path)
'''
|
normal
|
{
"blob_id": "65c0d940bacc2d016121812c435cc60f3fc1ba90",
"index": 7233,
"step-1": "#!usr/bin/env python\r\n#-*- coding:utf-8 -*-\r\n\r\n# this model is for decision tree\r\n# objective: To cluster different service\r\n# JialongLi 2017/03/18\r\n\r\nimport re\r\nimport os\r\nimport sys\r\nimport pickle\r\nimport copy\r\nimport random\r\nimport pydotplus\r\n\r\n\r\nUSER_NUM = 1000\r\nreload(sys)\r\nsys.setdefaultencoding( \"utf-8\" )\r\nfrom sklearn import tree\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.cluster import KMeans\r\n\r\n# 0 represent Sunday, 1: Monday, 6: Saturday, 0: Sunday\r\nday_index = {'0507': 1, '0508': 2, '0509': 3, '0510': 4, '0511': 5, '0512': 6, '0513': 0, \r\n\t\t\t '0604': 1, '0605': 2, '0606': 3, '0607': 4, '0608': 5, '0609': 6, '0610': 0, \r\n\t\t\t '0702': 1, '0703': 2, '0704': 3, '0705': 4, '0706': 5, '0707': 6, '0708': 0, \r\n\t\t\t '0806': 1, '0807': 2, '0808': 3, '0809': 4, '0810': 5, '0811': 6, '0812': 0}\r\n\r\nservice_type = ['I', 'F', 'W', 'G', 'S', 'V']\r\n\r\n# get activity_dict\r\n# user's activity: default value is 'F'\r\n# format: {id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}\r\ndef get_activity_dict(activity_dict_path):\r\n\tpkl_file = open(activity_dict_path, 'rb')\r\n\tactivity_dict = pickle.load(pkl_file)\r\n\tpkl_file.close()\r\n\treturn activity_dict\r\n\r\n# data are divided into train data and test data\r\n# first three weeks: train data; last week: test data\r\n# train_dict and test_dict are subset of activity_dict, id format is different\r\n# activity_dict format: {real id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}\r\n# user_id_index: key = number, value = real id\r\ndef data_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path):\r\n\ttrain_dict = {}\r\n\ttest_dict = {}\r\n\tuser_count = 0\r\n\tuser_id_index = {}\r\n\tfor key_0, value_0 in activity_dict.items(): # key_0: real user_id\r\n\t\ttrain_dict[user_count] = {}\r\n\t\ttest_dict[user_count] = {}\r\n\t\tuser_id_index[user_count] = key_0\r\n\t\tfor key, value in value_0.items():\r\n\t\t\tif key[1] == '8': # data of August, test set\r\n\t\t\t\ttest_dict[user_count][key] = value\r\n\t\t\telse:\r\n\t\t\t\ttrain_dict[user_count][key] = value # train set\r\n\t\tuser_count += 1\r\n\r\n\toutput_1 = open(train_dict_path, 'wb')\r\n\tpickle.dump(train_dict, output_1)\r\n\toutput_2 = open(test_dict_path, 'wb')\r\n\tpickle.dump(test_dict, output_2)\r\n\toutput_3 = open(user_id_index_path, 'wb')\r\n\tpickle.dump(user_id_index, output_3)\r\n\toutput_1.close()\r\n\toutput_2.close()\r\n\toutput_3.close()\r\n\r\n# get train data and test data\r\n# train_dict, test_dict format: {number id_1:{'0507': [24/PERIOD], '0508': ['I', 'W', 'G']}, id_2}\r\ndef get_data(train_dict_path, test_dict_path, user_id_index_path):\r\n\tpkl_file_1 = open(train_dict_path, 'rb')\r\n\tpkl_file_2 = open(test_dict_path, 'rb')\r\n\tpkl_file_3 = open(user_id_index_path, 'rb')\r\n\ttrain_dict = pickle.load(pkl_file_1)\r\n\ttest_dict = pickle.load(pkl_file_2)\r\n\tuser_id_index = pickle.load(pkl_file_3)\r\n\tpkl_file_1.close()\r\n\tpkl_file_2.close()\r\n\tpkl_file_3.close()\r\n\treturn train_dict, test_dict, user_id_index\r\n\r\n# get profile\r\ndef get_profile(profile_path):\r\n\tpkl_file = open(profile_path, 'rb')\r\n\tprofile = pickle.load(pkl_file)\r\n\treturn profile\r\n\r\n# select different features\r\n# feature format: [user_id, gender, age, edu, job, hour, date], 7 features\r\n# profile: dict, {real user_id: [gender, age, edu, job]}\r\n# feature format: double list, outer list element is a sample: [number user_id, gender, age, edu, job, hour, date]\r\n# category format: list, element is service type, length = feature\r\ndef feature_select(data_dict, profile, user_id_index, is_over_sampling):\r\n\tfeature = []\r\n\tcategory = []\r\n\tover_sampling_num = 0\r\n\tfor user_id, all_dates in data_dict.items():\r\n\t\treal_user_id = user_id_index[user_id]\r\n\t\tone_user_profile = copy.deepcopy(profile[real_user_id]) # gender, age, edu, job\r\n\t\tone_user_profile.insert(0, user_id) # insert user_id\r\n\t\tfor date, activity in all_dates.items():\r\n\t\t\tfor i in range(len(activity)):\r\n\t\t\t\tif 1: #activity[i] != 'F': # do not add 'F'\r\n\t\t\t\t\tsample = copy.deepcopy(one_user_profile)\r\n\t\t\t\t\t#del(sample[1:4])\r\n\t\t\t\t\tsample.append(i) #(int(i/6)) # i represents hour\r\n\t\t\t\t\tsample.append(day_index[date]) # day_index: 7 days in one week\r\n\t\t\t\t\tfeature.append(sample)\r\n\t\t\t\t\t#category.append(activity[i])\r\n\t\t\t\t\tif activity[i] == 'F':\r\n\t\t\t\t\t\tcategory.append('F')\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tcategory.append('O')\r\n\t\t\t\t\tif is_over_sampling and len(sample) > 5: # make sure that features are completed\r\n\t\t\t\t\t\tif activity[i] != 'F':\r\n\t\t\t\t\t\t\tsample_over = [[] for k in range(over_sampling_num)]\r\n\t\t\t\t\t\t\tfor j in range(over_sampling_num):\r\n\t\t\t\t\t\t\t\tsample_over[j] = copy.deepcopy(sample)\r\n\t\t\t\t\t\t\t\tsample_over[j][-3] = random.randint(0, 8) # random disturbance in job feature\r\n\t\t\t\t\t\t\t\tfeature.append(sample_over[j])\r\n\t\t\t\t\t\t\t\tcategory.append('O')\r\n\treturn feature, category\r\n\r\n# build features, all features\r\n# False means test data do not need over sampling\r\ndef feature_build(train_dict, test_dict, profile, user_id_index):\r\n\tfeature_train, category_train = feature_select(train_dict, profile, user_id_index, True)\r\n\tfeature_test, category_test = feature_select(test_dict, profile, user_id_index, False)\r\n\treturn feature_train, feature_test, category_train, category_test\r\n\r\n# calculating the hit rate\r\ndef cal_hit_rate(category_predict, category_test):\r\n\thit_count = 0\r\n\tsample_test_count = len(category_predict)\r\n\tfor i in range(sample_test_count):\r\n\t\tif category_predict[i] == category_test[i]:\r\n\t\t\thit_count += 1\r\n\thit_rate = float(hit_count) / float(sample_test_count)\r\n\tprint 'hit rate: ' + str(round(hit_rate, 4) * 100) + '%'\r\n\r\n# calculating F value\r\ndef calculating_F_value(category_predict, category_test):\r\n\tn_predict = 0\r\n\tn_origin = 0\r\n\thit_count = 0\r\n\tfor item in category_predict:\r\n\t\tif item != 'F':\r\n\t\t\tn_predict += 1\r\n\tfor item in category_test:\r\n\t\tif item != 'F':\r\n\t\t\tn_origin += 1\r\n\tfor i in range(len(category_predict)):\r\n\t\tif category_predict[i] != 'F' and category_predict[i] == category_test[i]:\r\n\t\t\thit_count += 1\r\n\tprecision = float(hit_count) / float(n_predict)\r\n\trecall = float(hit_count) / float(n_origin)\r\n\tF_value = 2 * precision * recall / (precision + recall)\r\n\tprint 'n_predict: ' + str(n_predict)\r\n\tprint 'n_origin: ' + str(n_origin)\r\n\tprint 'precision: ' + str(round(precision, 3))\r\n\tprint 'recall: ' + str(round(recall, 3))\r\n\tprint 'F_value: ' + str(round(F_value, 3))\r\n\r\n# 1. select the service type using most in that period in past days\r\n# 2. if user did not use service in that period before, select the service type using most in past days\r\n# 3. if user did not use service before, select service randomly \r\n# service_count_hour: key = (user_id, hour, service_type) value = count\r\n# service_count_past: key = (user_id, service_type) value = count\r\n# service_hour: key = (user_id, hour), value = [service_type, count]\r\n# service_past: key = user_id, value = [service_type, count]\r\ndef conventional_method_Mused(feature_train, feature_test, category_train):\r\n\tif len(feature_train[0]) != 7:\r\n\t\tprint 'feature wrong'\r\n\tservice_count_hour = {}\r\n\tservice_count_past = {}\r\n\tfor i in range(len(feature_train)):\r\n\t\tkey_hour = (feature_train[i][0], feature_train[i][5], category_train[i])\r\n\t\tif key_hour not in service_count_hour:\r\n\t\t\tservice_count_hour[key_hour] = 1\r\n\t\telse:\r\n\t\t\tservice_count_hour[key_hour] += 1\r\n\r\n\t\tkey_past = (feature_train[i][0], category_train[i])\r\n\t\tif key_past not in service_count_past:\r\n\t\t\tservice_count_past[key_past] = 1\r\n\t\telse:\r\n\t\t\tservice_count_past[key_past] += 1\r\n\r\n\tservice_hour = {}\r\n\tservice_past = {}\r\n\tfor key, value in service_count_hour.items():\r\n\t\tkey_hour = (key[0], key[1])\r\n\t\tif key_hour not in service_hour:\r\n\t\t\tservice_hour[key_hour] = [key[2], value]\r\n\t\telse:\r\n\t\t\tif value > service_hour[key_hour][1]:\r\n\t\t\t\tservice_hour[key_hour] = [key[2], value]\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\r\n\tfor key, value in service_count_past.items():\r\n\t\tkey_past = key[0]\r\n\t\tif key_past not in service_past:\r\n\t\t\tservice_past[key_past] = [key[1], value]\r\n\t\telse:\r\n\t\t\tif value > service_past[key_past][1]:\r\n\t\t\t\tservice_past[key_past] = [key[1], value]\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\r\n\tcategory_predict = []\r\n\tfor i in range(len(feature_test)):\r\n\t\tkey_0 = (feature_test[i][0], feature_test[i][5])\r\n\t\tkey_1 = feature_test[i][0]\r\n\t\tif key_0 in service_hour:\r\n\t\t\tvalue_0 = service_hour[key_0]\r\n\t\t\tcategory_predict.append(value_0[0])\r\n\t\telif key_1 in service_past:\r\n\t\t\tvalue_1 = service_past[key_1]\r\n\t\t\tcategory_predict.append(value_1[0])\r\n\t\telse:\r\n\t\t\trandom_num = random.randint(0, len(service_type)-1)\r\n\t\t\tcategory_predict.append(service_type[random_num])\r\n\r\n\treturn category_predict\r\n# method 2: service in last week\r\ndef conventional_method_Lweek(feature_train, feature_test, category_train):\r\n\tif len(feature_train[0]) != 7:\r\n\t\tprint 'feature wrong'\r\n\tcategory_predict = ['FFF' for i in range(len(feature_test))]\r\n\tfor i in range(len(feature_train)):\r\n\t\tsample = feature_train[i]\r\n\t\tuser_id = sample[0]\r\n\t\thour = sample[-2]\r\n\t\tdate = sample[-1]\r\n\t\tif date == 0: # 0 means it is Sunday and should be the last\r\n\t\t\tdate = 7\r\n\t\telse:\r\n\t\t\tpass\r\n\t\tservice_position = user_id * 168 + (date - 1) * 24 + hour\r\n\t\tcategory_predict[service_position] = category_train[i]\r\n\treturn category_predict\r\n\r\n# decision tree\r\ndef decision_tree(feature_train, feature_test, category_train):\r\n\tclf = tree.DecisionTreeClassifier()\r\n\tclf = clf.fit(feature_train, category_train)\r\n\tcategory_predict = clf.predict(feature_test) # the format of category_predict is weird\r\n\tcategory_Dtree = []\r\n\tfor item in category_predict:\r\n\t\tif item == 'F':\r\n\t\t\tcategory_Dtree.append('F')\r\n\t\telse:\r\n\t\t\tcategory_Dtree.append('O')\r\n\treturn category_Dtree \r\n\r\n# random forests\r\ndef random_forests(feature_train, feature_test, category_train):\r\n\tclf = RandomForestClassifier(n_estimators = 80)\r\n\tclf = clf.fit(feature_train, category_train)\r\n\tcategory_predict = clf.predict(feature_test)\r\n\tcategory_RF = []\r\n\tfor item in category_predict:\r\n\t\tif item == 'F':\r\n\t\t\tcategory_RF.append('F')\r\n\t\telse:\r\n\t\t\tcategory_RF.append('O')\r\n\treturn category_RF\r\n\r\n# save user_activity as pkl file for migration.py\r\ndef user_activity_save(user_activity, user_activity_path):\r\n\toutput = open(user_activity_path, 'wb')\r\n\tpickle.dump(user_activity, output)\r\n\toutput.close()\r\n\r\n# user_activity is for migration.py\r\n# key = user_id, range(1000), value = ['F', 'G'...], length is 7 * 24 = 168\r\ndef activity_restore(feature, category):\r\n\tif len(feature[0]) != 7:\r\n\t\tprint 'feature wrong'\r\n\tuser_activity = {}\r\n\tfor i in range(USER_NUM):\r\n\t\tuser_activity[i] = ['FFF' for j in range(168)]\r\n\tfor i in range(len(feature)):\r\n\t\tsample = feature[i]\r\n\t\tuser_id = sample[0]\r\n\t\thour = sample[5]\r\n\t\tdate = sample[-1]\r\n\t\tif date == 0: # 0 means it is Sunday and should be the last\r\n\t\t\tdate = 7\r\n\t\telse:\r\n\t\t\tpass\r\n\t\tposition = (date - 1) * 24 + hour\r\n\t\tuser_activity[user_id][position] = category[i]\r\n\treturn user_activity\r\n\r\ndef counting_accuate_rate(category_Dtree, category_test):\r\n\ton_on = 0\r\n\ton_off = 0\r\n\toff_on = 0\r\n\toff_off = 0\r\n\tprint len(category_test)\r\n\tprint len(category_Dtree)\r\n\tfor i in range(21504): #(len(category_Dtree)):\r\n\t\tif category_Dtree[i] == 'O' and category_test[i] == 'O':\r\n\t\t\ton_on += 1\r\n\t\telif category_Dtree[i] == 'O' and category_test[i] == 'F':\r\n\t\t\ton_off += 1\r\n\t\telif category_Dtree[i] == 'F' and category_test[i] == 'O':\r\n\t\t\toff_on += 1\r\n\t\telse:\r\n\t\t\toff_off += 1\r\n\tprint 'on_on' + '\\t' + str(on_on)\r\n\tprint 'on_off' + '\\t' + str(on_off)\r\n\tprint 'off_on' + '\\t' + str(off_on)\r\n\tprint 'off_off' + '\\t' + str(off_off)\r\n\r\n# save file for sleep.py\r\ndef save_file_for_sleep(category_predict, category_test):\r\n\tcategory_predict_path = '../data/category_predict_Dtree.pkl'\r\n\tcategory_test_path = '../data/category_test.pkl'\r\n\toutput_1 = open(category_predict_path, 'wb')\r\n\tpickle.dump(category_predict, output_1)\r\n\toutput_2 = open(category_test_path, 'wb')\r\n\tpickle.dump(category_test, output_2)\r\n\toutput_1.close()\r\n\toutput_2.close()\r\n\r\nif __name__ == '__main__':\r\n\t'''\r\n\tactivity_dict_path = '../data/activity_dict.pkl'\r\n\tactivity_dict = get_activity_dict(activity_dict_path)\r\n\ttrain_dict_path = '../data/train_dict.pkl'\r\n\ttest_dict_path = '../data/test_dict.pkl'\r\n\tuser_id_index_path = '../data/user_id_index.pkl'\r\n\tdata_segement(activity_dict, train_dict_path, test_dict_path, user_id_index_path)\r\n\t'''\r\n\r\n\ttrain_dict_path = '../data/train_dict.pkl'\r\n\ttest_dict_path = '../data/test_dict.pkl'\r\n\tuser_id_index_path = '../data/user_id_index.pkl'\r\n\ttrain_dict, test_dict, user_id_index = get_data(train_dict_path, test_dict_path, user_id_index_path)\r\n\tprofile_path = '../data/profile.pkl'\r\n\tprofile = get_profile(profile_path)\r\n\r\n\tfeature_train, feature_test, category_train, category_test = feature_build(train_dict, test_dict, profile, user_id_index)\r\n\tprint 'feature_train sample: ' + str(feature_train[1000])\r\n\tprint 'feature_test sample: ' + str(feature_test[1000])\r\n\r\n\t# decision tree\r\n\tcategory_Dtree = decision_tree(feature_train, feature_test, category_train)\r\n\r\n\t# random_forests\r\n\t#category_RF = random_forests(feature_train, feature_test, category_train)\r\n\r\n\t# conventional method: most-used service\r\n\t#category_Mused = conventional_method_Mused(feature_train, feature_test, category_train)\r\n\r\n\t# conventional method: last-week service\r\n\t#category_Lweek = conventional_method_Lweek(feature_train, feature_test, category_train)\r\n\r\n\r\n\t#cal_hit_rate(category_Dtree, category_test)\r\n\t#calculating_F_value(category_Dtree, category_test)\r\n\t\r\n\t#counting_accuate_rate(category_Dtree, category_test)\r\n\r\n\t#save_file_for_sleep(category_Dtree, category_test)\r\n\r\n\t# this part is for migration.py\r\n\t'''\r\n\t# origin data, user_activity_origin is users' real behavior\r\n\tuser_activity_origin_path = '../data/user_activity_test/user_activity_origin.pkl'\r\n\tuser_activity_origin = activity_restore(feature_test, category_test)\r\n\tuser_activity_save(user_activity_origin, user_activity_origin_path)\r\n\t'''\r\n\t'''\r\n\t# predition data using decision_tree\r\n\tuser_activity_Dtree_path = '../data/user_activity_test/user_activity_Dtree.pkl'\r\n\tuser_activity_Dtree = activity_restore(feature_test, category_Dtree)\r\n\tuser_activity_save(user_activity_Dtree, user_activity_Dtree_path)\r\n\t'''\r\n\t'''\r\n\t# predition data according to users' most-used service\r\n\tuser_activity_Mused_path = '../data/user_activity_test/user_activity_Mused.pkl'\r\n\tuser_activity_Mused = activity_restore(feature_test, category_Mused)\r\n\tuser_activity_save(user_activity_Mused, user_activity_Mused_path)\r\n\t'''\r\n\t'''\r\n\t# predition data according to users' last-week service\r\n\tuser_activity_Lweek_path = '../data/user_activity_test/user_activity_Lweek.pkl'\r\n\tuser_activity_Lweek = activity_restore(feature_test, category_Lweek)\r\n\tuser_activity_save(user_activity_Lweek, user_activity_Lweek_path)\r\n\t'''",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def face_detector(img, face_cascade, eye_cascade, face_f):
xf = face_f[0]
yf = face_f[1]
wf = face_f[2]
hf = face_f[3]
xi = 0
yi = 0
wi = img.shape[1]
hi = img.shape[0]
c = float(0.1)
print('face_f: ', xf, xf + wf, yf, yf + hf)
if xf != xi or yf != yi or wf != wi or hf != hi:
y1 = yf - round(c * hf)
y2 = yf + hf + round(c * hf)
x1 = xf - round(c * wf)
x2 = xf + wf + round(c * wf)
roi_f = img[y1:y2, x1:x2]
print('Face apertura: ', x1, x2, y1, y2)
cv2.imshow('Face apertura', roi_f)
else:
roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +
face_f[2]]
gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray_img', gray_img)
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,
minNeighbors=5)
print('Faces: ', faces)
if type(faces) == np.ndarray:
flag = -1
for x, y, w, h in faces:
flag = flag + 1
if w >= 100 and w <= 125 and h >= 100 and h <= 125:
print('Entro en el if de tamaño')
print('Face: ', x, y, w, h)
roi_gray = gray_img[y:y + h, x:x + w]
cv2.imshow('roi_gray', roi_gray)
eyes = eye_cascade.detectMultiScale(roi_gray)
c_eyes = 0
for ex, ey, ew, eh in eyes:
c_eyes = c_eyes + 1
if c_eyes >= 2:
print('faces[flag]', faces[flag])
return faces[flag]
<|reserved_special_token_1|>
import cv2
import numpy as np
def face_detector(img, face_cascade, eye_cascade, face_f):
xf = face_f[0]
yf = face_f[1]
wf = face_f[2]
hf = face_f[3]
xi = 0
yi = 0
wi = img.shape[1]
hi = img.shape[0]
c = float(0.1)
print('face_f: ', xf, xf + wf, yf, yf + hf)
if xf != xi or yf != yi or wf != wi or hf != hi:
y1 = yf - round(c * hf)
y2 = yf + hf + round(c * hf)
x1 = xf - round(c * wf)
x2 = xf + wf + round(c * wf)
roi_f = img[y1:y2, x1:x2]
print('Face apertura: ', x1, x2, y1, y2)
cv2.imshow('Face apertura', roi_f)
else:
roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +
face_f[2]]
gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray_img', gray_img)
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,
minNeighbors=5)
print('Faces: ', faces)
if type(faces) == np.ndarray:
flag = -1
for x, y, w, h in faces:
flag = flag + 1
if w >= 100 and w <= 125 and h >= 100 and h <= 125:
print('Entro en el if de tamaño')
print('Face: ', x, y, w, h)
roi_gray = gray_img[y:y + h, x:x + w]
cv2.imshow('roi_gray', roi_gray)
eyes = eye_cascade.detectMultiScale(roi_gray)
c_eyes = 0
for ex, ey, ew, eh in eyes:
c_eyes = c_eyes + 1
if c_eyes >= 2:
print('faces[flag]', faces[flag])
return faces[flag]
<|reserved_special_token_1|>
#LIBRERIAS
import cv2
import numpy as np
#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras
def face_detector(img, face_cascade, eye_cascade, face_f):
#variables face_f
xf = face_f[0]
yf = face_f[1]
wf = face_f[2]
hf = face_f[3]
#variables img
xi = 0
yi = 0
wi = img.shape[1]
hi = img.shape[0]
#apertura de face_f con relacion a la img
c = float(0.1) #esto es un 10 %
print("face_f: ", xf, xf + wf, yf, yf + hf)
#roi_i = img[yf: yf + hf, xf: xf + wf]
#cv2.imshow("roi_i", roi_i)
if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)
#face_f no es igual a img, hace falta la apertura
y1 = yf - round(c * hf)
y2 = yf + hf + round(c * hf)
x1 = xf - round(c * wf)
x2 = xf + wf + round(c * wf)
roi_f = img[y1: y2, x1: x2]
print("Face apertura: ", x1, x2, y1, y2)
cv2.imshow('Face apertura',roi_f)
else:
#face_f es igual a img, no hace falta la apertura
roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]
#cv2.imshow('roi_f',roi_f)
#paso el roi_f a gris para un mejor tratamiento
gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray_img",gray_img)
#aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width
faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)
print("Faces: ", faces)
if type(faces) == np.ndarray:
flag = -1
for x,y,w,h in faces:
flag = flag + 1
#print("Face: ", x,y,w,h)
if w >= 100 and w <= 125 and h >= 100 and h <= 125:
print("Entro en el if de tamaño")
#Region Of Interest
print("Face: ", x,y,w,h)
roi_gray = gray_img[y:y+h, x:x+w]
cv2.imshow("roi_gray", roi_gray)
#aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes
eyes = eye_cascade.detectMultiScale(roi_gray)
c_eyes = 0
for ex,ey,ew,eh in eyes:
c_eyes = c_eyes + 1
if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara
print("faces[flag]", faces[flag])
return faces[flag]
|
flexible
|
{
"blob_id": "1df3a5dc8ed767e20d34c2836eed79872a21a016",
"index": 9948,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-3": "import cv2\nimport numpy as np\n\n\ndef face_detector(img, face_cascade, eye_cascade, face_f):\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n c = float(0.1)\n print('face_f: ', xf, xf + wf, yf, yf + hf)\n if xf != xi or yf != yi or wf != wi or hf != hi:\n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n roi_f = img[y1:y2, x1:x2]\n print('Face apertura: ', x1, x2, y1, y2)\n cv2.imshow('Face apertura', roi_f)\n else:\n roi_f = img[face_f[1]:face_f[1] + face_f[3], face_f[0]:face_f[0] +\n face_f[2]]\n gray_img = cv2.cvtColor(roi_f, cv2.COLOR_BGR2GRAY)\n cv2.imshow('gray_img', gray_img)\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04,\n minNeighbors=5)\n print('Faces: ', faces)\n if type(faces) == np.ndarray:\n flag = -1\n for x, y, w, h in faces:\n flag = flag + 1\n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print('Entro en el if de tamaño')\n print('Face: ', x, y, w, h)\n roi_gray = gray_img[y:y + h, x:x + w]\n cv2.imshow('roi_gray', roi_gray)\n eyes = eye_cascade.detectMultiScale(roi_gray)\n c_eyes = 0\n for ex, ey, ew, eh in eyes:\n c_eyes = c_eyes + 1\n if c_eyes >= 2:\n print('faces[flag]', faces[flag])\n return faces[flag]\n",
"step-4": "#LIBRERIAS\nimport cv2\nimport numpy as np\n\n#FUNCION: recibe una imagen y te devuelve las coordenadas de las caras\ndef face_detector(img, face_cascade, eye_cascade, face_f): \n\n #variables face_f\n xf = face_f[0]\n yf = face_f[1]\n wf = face_f[2]\n hf = face_f[3]\n \n #variables img\n xi = 0\n yi = 0\n wi = img.shape[1]\n hi = img.shape[0]\n\n #apertura de face_f con relacion a la img\n c = float(0.1) #esto es un 10 %\n \n print(\"face_f: \", xf, xf + wf, yf, yf + hf)\n #roi_i = img[yf: yf + hf, xf: xf + wf]\n #cv2.imshow(\"roi_i\", roi_i)\n\n if xf != xi or yf != yi or wf != wi or hf != hi: #(tendre que ver si AND o OR)\n #face_f no es igual a img, hace falta la apertura\n \n y1 = yf - round(c * hf)\n y2 = yf + hf + round(c * hf)\n x1 = xf - round(c * wf)\n x2 = xf + wf + round(c * wf)\n\n roi_f = img[y1: y2, x1: x2]\n \n print(\"Face apertura: \", x1, x2, y1, y2)\n cv2.imshow('Face apertura',roi_f)\n\n else:\n\n #face_f es igual a img, no hace falta la apertura\n \n roi_f = img[face_f[1] : face_f[1] + face_f[3], face_f[0] : face_f[0] + face_f[2]]\n\n #cv2.imshow('roi_f',roi_f)\n\n\n\n #paso el roi_f a gris para un mejor tratamiento\n gray_img = cv2.cvtColor(roi_f,cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"gray_img\",gray_img)\n \n #aplicar el clasificador de caras sobre la imagen y guardo el resultado en faces: seran la x, y, height y width\n faces = face_cascade.detectMultiScale(gray_img, scaleFactor=1.04, minNeighbors=5)\n print(\"Faces: \", faces)\n\n if type(faces) == np.ndarray:\n\n flag = -1\n\n for x,y,w,h in faces:\n\n flag = flag + 1\n\n #print(\"Face: \", x,y,w,h)\n \n if w >= 100 and w <= 125 and h >= 100 and h <= 125:\n print(\"Entro en el if de tamaño\")\n #Region Of Interest\n print(\"Face: \", x,y,w,h)\n roi_gray = gray_img[y:y+h, x:x+w]\n \n cv2.imshow(\"roi_gray\", roi_gray)\n\n #aplico el clasificador de ojos sobre la imagen de interes que se supone que es una cara y guardo el resultado en eyes\n eyes = eye_cascade.detectMultiScale(roi_gray)\n \n c_eyes = 0\n\n for ex,ey,ew,eh in eyes:\n \n c_eyes = c_eyes + 1\n\n if c_eyes >= 2: #si hay mínimo dos ojos (a veces la boca abierta la detecta como un tercer ojo), es una cara\n print(\"faces[flag]\", faces[flag])\n return faces[flag]\n \n \n \n \n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_Kmeans(*data):
x, labels_true = data
clst = cluster.KMeans()
clst.fit(x)
predicted_labels = clst.predict(x)
print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))
print('Sum center distance %s' % (clst.inertia_,))
def test_Kmeans_nclusters(*data):
"""
测试KMeans的聚类结果随参数n_clusters的参数的影响
在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数
的变化情况
"""
x, labels_true = data
nums = range(1, 50)
ARIs = []
Distances = []
for num in nums:
clst = cluster.KMeans(n_clusters=num)
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs.append(adjusted_rand_score(labels_true, predicted_labels))
Distances.append(clst.inertia_)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs, marker='+')
ax.set_xlabel('n_clusters')
ax.set_ylabel('ARI')
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances, marker='o')
ax.set_xlabel('n_cluster')
ax.set_ylabel('intertia_')
fig.suptitle('KMeans')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_Kmeans(*data):
x, labels_true = data
clst = cluster.KMeans()
clst.fit(x)
predicted_labels = clst.predict(x)
print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))
print('Sum center distance %s' % (clst.inertia_,))
def test_Kmeans_nclusters(*data):
"""
测试KMeans的聚类结果随参数n_clusters的参数的影响
在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数
的变化情况
"""
x, labels_true = data
nums = range(1, 50)
ARIs = []
Distances = []
for num in nums:
clst = cluster.KMeans(n_clusters=num)
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs.append(adjusted_rand_score(labels_true, predicted_labels))
Distances.append(clst.inertia_)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs, marker='+')
ax.set_xlabel('n_clusters')
ax.set_ylabel('ARI')
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances, marker='o')
ax.set_xlabel('n_cluster')
ax.set_ylabel('intertia_')
fig.suptitle('KMeans')
plt.show()
def test_KMeans_n_init(*data):
"""
该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响
"""
x, labels_true = data
nums = range(1, 50)
fig = plt.figure()
ARIs_k = []
Distances_k = []
ARIs_r = []
Distances_r = []
for num in nums:
clst = cluster.KMeans(n_init=num, init='k-means++')
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_k.append(clst.inertia_)
clst = cluster.KMeans(n_init=num, init='random')
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_r.append(clst.inertia_)
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs_k, marker='+', label='k-means++')
ax.plot(nums, ARIs_r, marker='+', label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('ARI')
ax.set_ylim(0, 1)
ax.legend(loc='best')
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances_k, marker='o', label='k-means++')
ax.plot(nums, Distances_r, marker='o', label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('inertia_')
ax.legend(loc='best')
fig.suptitle('KMeans')
plt.show()
<|reserved_special_token_1|>
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
def test_Kmeans(*data):
x, labels_true = data
clst = cluster.KMeans()
clst.fit(x)
predicted_labels = clst.predict(x)
print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))
print('Sum center distance %s' % (clst.inertia_,))
def test_Kmeans_nclusters(*data):
"""
测试KMeans的聚类结果随参数n_clusters的参数的影响
在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数
的变化情况
"""
x, labels_true = data
nums = range(1, 50)
ARIs = []
Distances = []
for num in nums:
clst = cluster.KMeans(n_clusters=num)
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs.append(adjusted_rand_score(labels_true, predicted_labels))
Distances.append(clst.inertia_)
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs, marker='+')
ax.set_xlabel('n_clusters')
ax.set_ylabel('ARI')
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances, marker='o')
ax.set_xlabel('n_cluster')
ax.set_ylabel('intertia_')
fig.suptitle('KMeans')
plt.show()
def test_KMeans_n_init(*data):
"""
该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响
"""
x, labels_true = data
nums = range(1, 50)
fig = plt.figure()
ARIs_k = []
Distances_k = []
ARIs_r = []
Distances_r = []
for num in nums:
clst = cluster.KMeans(n_init=num, init='k-means++')
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_k.append(clst.inertia_)
clst = cluster.KMeans(n_init=num, init='random')
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_r.append(clst.inertia_)
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs_k, marker='+', label='k-means++')
ax.plot(nums, ARIs_r, marker='+', label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('ARI')
ax.set_ylim(0, 1)
ax.legend(loc='best')
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances_k, marker='o', label='k-means++')
ax.plot(nums, Distances_r, marker='o', label='random')
ax.set_xlabel('n_init')
ax.set_ylabel('inertia_')
ax.legend(loc='best')
fig.suptitle('KMeans')
plt.show()
<|reserved_special_token_1|>
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import matplotlib.pyplot as plt
def test_Kmeans(*data):
x,labels_true = data
clst = cluster.KMeans()
clst.fit(x)
predicted_labels = clst.predict(x)
print("ARI: %s" % adjusted_rand_score(labels_true, predicted_labels))
print("Sum center distance %s" % (clst.inertia_,))
def test_Kmeans_nclusters(*data):
"""
测试KMeans的聚类结果随参数n_clusters的参数的影响
在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数
的变化情况
"""
x, labels_true = data
nums = range(1, 50)
ARIs = []
Distances = []
for num in nums:
clst = cluster.KMeans(n_clusters = num)
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs.append(adjusted_rand_score(labels_true, predicted_labels))
Distances.append(clst.inertia_)
# 绘图
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs, marker = "+")
ax.set_xlabel("n_clusters")
ax.set_ylabel("ARI")
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances, marker = "o")
ax.set_xlabel("n_cluster")
ax.set_ylabel("intertia_")
fig.suptitle("KMeans")
plt.show()
def test_KMeans_n_init(*data):
"""
该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响
"""
x, labels_true = data
nums = range(1, 50)
# 绘图
fig = plt.figure()
ARIs_k = []
Distances_k = []
ARIs_r = []
Distances_r = []
for num in nums:
clst = cluster.KMeans(n_init = num, init = "k-means++")
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_k.append(clst.inertia_)
clst = cluster.KMeans(n_init = num, init = "random")
clst.fit(x)
predicted_labels = clst.predict(x)
ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))
Distances_r.append(clst.inertia_)
ax = fig.add_subplot(1, 2, 1)
ax.plot(nums, ARIs_k, marker = "+", label = "k-means++")
ax.plot(nums, ARIs_r, marker = "+", label = "random")
ax.set_xlabel("n_init")
ax.set_ylabel("ARI")
ax.set_ylim(0, 1)
ax.legend(loc = "best")
ax = fig.add_subplot(1, 2, 2)
ax.plot(nums, Distances_k, marker = "o", label = "k-means++")
ax.plot(nums, Distances_r, marker = "o", label = "random")
ax.set_xlabel("n_init")
ax.set_ylabel("inertia_")
ax.legend(loc = "best")
fig.suptitle("KMeans")
plt.show()
|
flexible
|
{
"blob_id": "bd419d0a197a5e5a99a370e45cdb53a276ac5507",
"index": 5633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n fig = plt.figure()\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init=num, init='k-means++')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n clst = cluster.KMeans(n_init=num, init='random')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker='+', label='k-means++')\n ax.plot(nums, ARIs_r, marker='+', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('ARI')\n ax.set_ylim(0, 1)\n ax.legend(loc='best')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker='o', label='k-means++')\n ax.plot(nums, Distances_r, marker='o', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('inertia_')\n ax.legend(loc='best')\n fig.suptitle('KMeans')\n plt.show()\n",
"step-4": "from sklearn import cluster\nfrom sklearn.metrics import adjusted_rand_score\nimport matplotlib.pyplot as plt\n\n\ndef test_Kmeans(*data):\n x, labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print('ARI: %s' % adjusted_rand_score(labels_true, predicted_labels))\n print('Sum center distance %s' % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters=num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker='+')\n ax.set_xlabel('n_clusters')\n ax.set_ylabel('ARI')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker='o')\n ax.set_xlabel('n_cluster')\n ax.set_ylabel('intertia_')\n fig.suptitle('KMeans')\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n fig = plt.figure()\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init=num, init='k-means++')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n clst = cluster.KMeans(n_init=num, init='random')\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker='+', label='k-means++')\n ax.plot(nums, ARIs_r, marker='+', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('ARI')\n ax.set_ylim(0, 1)\n ax.legend(loc='best')\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker='o', label='k-means++')\n ax.plot(nums, Distances_r, marker='o', label='random')\n ax.set_xlabel('n_init')\n ax.set_ylabel('inertia_')\n ax.legend(loc='best')\n fig.suptitle('KMeans')\n plt.show()\n",
"step-5": "from sklearn import cluster\nfrom sklearn.metrics import adjusted_rand_score\nimport matplotlib.pyplot as plt\n\ndef test_Kmeans(*data):\n x,labels_true = data\n clst = cluster.KMeans()\n clst.fit(x)\n predicted_labels = clst.predict(x)\n print(\"ARI: %s\" % adjusted_rand_score(labels_true, predicted_labels))\n print(\"Sum center distance %s\" % (clst.inertia_,))\n\n\ndef test_Kmeans_nclusters(*data):\n \"\"\"\n 测试KMeans的聚类结果随参数n_clusters的参数的影响\n 在这里,主要分别研究ARI和所有样本距离各簇中心的距离值和随簇的个数\n 的变化情况\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n ARIs = []\n Distances = []\n for num in nums:\n clst = cluster.KMeans(n_clusters = num)\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances.append(clst.inertia_)\n # 绘图\n fig = plt.figure()\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs, marker = \"+\")\n ax.set_xlabel(\"n_clusters\")\n ax.set_ylabel(\"ARI\")\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances, marker = \"o\")\n ax.set_xlabel(\"n_cluster\")\n ax.set_ylabel(\"intertia_\")\n fig.suptitle(\"KMeans\")\n plt.show()\n\n\ndef test_KMeans_n_init(*data):\n \"\"\"\n 该函数考察KMeans算法运行的次数和选择的初始中心向量策略的影响\n \"\"\"\n x, labels_true = data\n nums = range(1, 50)\n # 绘图\n fig = plt.figure()\n\n ARIs_k = []\n Distances_k = []\n ARIs_r = []\n Distances_r = []\n for num in nums:\n clst = cluster.KMeans(n_init = num, init = \"k-means++\")\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_k.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_k.append(clst.inertia_)\n \n clst = cluster.KMeans(n_init = num, init = \"random\")\n clst.fit(x)\n predicted_labels = clst.predict(x)\n ARIs_r.append(adjusted_rand_score(labels_true, predicted_labels))\n Distances_r.append(clst.inertia_)\n ax = fig.add_subplot(1, 2, 1)\n ax.plot(nums, ARIs_k, marker = \"+\", label = \"k-means++\")\n ax.plot(nums, ARIs_r, marker = \"+\", label = \"random\")\n ax.set_xlabel(\"n_init\")\n ax.set_ylabel(\"ARI\")\n ax.set_ylim(0, 1)\n ax.legend(loc = \"best\")\n ax = fig.add_subplot(1, 2, 2)\n ax.plot(nums, Distances_k, marker = \"o\", label = \"k-means++\")\n ax.plot(nums, Distances_r, marker = \"o\", label = \"random\")\n ax.set_xlabel(\"n_init\")\n ax.set_ylabel(\"inertia_\")\n ax.legend(loc = \"best\")\n fig.suptitle(\"KMeans\")\n plt.show()\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import matplotlib.pyplot as plotOp
import numpy as np
from random import randint
import re as regexOp
|
normal
|
{
"blob_id": "6c0a1d4ffd64e0566be53937d9b48975f2530852",
"index": 7767,
"step-1": "<mask token>\n",
"step-2": "import matplotlib.pyplot as plotOp\nimport numpy as np\nfrom random import randint\nimport re as regexOp\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def maxSideLength(self, mat: List[List[int]], threshold: int) ->int:
def squareSum(r1: int, c1: int, r2: int, c2: int) ->int:
return prefixSum[r2 + 1][c2 + 1] - prefixSum[r1][c2 + 1
] - prefixSum[r2 + 1][c1] + prefixSum[r1][c1]
m = len(mat)
n = len(mat[0])
ans = 0
prefixSum = [([0] * (n + 1)) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
prefixSum[i][j] = mat[i - 1][j - 1] + prefixSum[i][j - 1
] + prefixSum[i - 1][j] - prefixSum[i - 1][j - 1]
for i in range(m):
for j in range(n):
for length in range(ans, min(m - i, n - j)):
if squareSum(i, j, i + length, j + length) > threshold:
break
ans = max(ans, length + 1)
return ans
<|reserved_special_token_1|>
class Solution:
def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:
def squareSum(r1: int, c1: int, r2: int, c2: int) -> int:
return prefixSum[r2 + 1][c2 + 1] - prefixSum[r1][c2 + 1] - prefixSum[r2 + 1][c1] + prefixSum[r1][c1]
m = len(mat)
n = len(mat[0])
ans = 0
prefixSum = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
prefixSum[i][j] = mat[i - 1][j - 1] + prefixSum[i][j - 1] + \
prefixSum[i - 1][j] - prefixSum[i - 1][j - 1]
for i in range(m):
for j in range(n):
for length in range(ans, min(m - i, n - j)):
if squareSum(i, j, i + length, j + length) > threshold:
break
ans = max(ans, length + 1)
return ans
|
flexible
|
{
"blob_id": "c8f2df1471a9581d245d52437470b6c67b341ece",
"index": 7297,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def maxSideLength(self, mat: List[List[int]], threshold: int) ->int:\n\n def squareSum(r1: int, c1: int, r2: int, c2: int) ->int:\n return prefixSum[r2 + 1][c2 + 1] - prefixSum[r1][c2 + 1\n ] - prefixSum[r2 + 1][c1] + prefixSum[r1][c1]\n m = len(mat)\n n = len(mat[0])\n ans = 0\n prefixSum = [([0] * (n + 1)) for _ in range(m + 1)]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n prefixSum[i][j] = mat[i - 1][j - 1] + prefixSum[i][j - 1\n ] + prefixSum[i - 1][j] - prefixSum[i - 1][j - 1]\n for i in range(m):\n for j in range(n):\n for length in range(ans, min(m - i, n - j)):\n if squareSum(i, j, i + length, j + length) > threshold:\n break\n ans = max(ans, length + 1)\n return ans\n",
"step-4": "class Solution:\n def maxSideLength(self, mat: List[List[int]], threshold: int) -> int:\n def squareSum(r1: int, c1: int, r2: int, c2: int) -> int:\n return prefixSum[r2 + 1][c2 + 1] - prefixSum[r1][c2 + 1] - prefixSum[r2 + 1][c1] + prefixSum[r1][c1]\n\n m = len(mat)\n n = len(mat[0])\n\n ans = 0\n prefixSum = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n prefixSum[i][j] = mat[i - 1][j - 1] + prefixSum[i][j - 1] + \\\n prefixSum[i - 1][j] - prefixSum[i - 1][j - 1]\n\n for i in range(m):\n for j in range(n):\n for length in range(ans, min(m - i, n - j)):\n if squareSum(i, j, i + length, j + length) > threshold:\n break\n ans = max(ans, length + 1)\n\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plt.plot(dev_x, dev_y, label='All Devs')
<|reserved_special_token_0|>
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
plt.legend()
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317,
68748, 73752]
plt.plot(dev_x, dev_y, label='All Devs')
py_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496,
75370, 83640]
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
plt.legend()
plt.show()
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
dev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317,
68748, 73752]
plt.plot(dev_x, dev_y, label='All Devs')
py_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496,
75370, 83640]
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
plt.legend()
plt.show()
<|reserved_special_token_1|>
from matplotlib import pyplot as plt
dev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]
dev_y = [4000, 45000, 50000, 55000, 60000,
56000, 62316, 64928, 67317, 68748, 73752]
plt.plot(dev_x, dev_y, label='All Devs')
#dev_x and dev_y are respectively x-axis and y-axis
# Median Python Developer Salaries by Age
py_dev_y = [45372, 48876, 53850, 57287, 63016,
65998, 70003, 70000, 71496, 75370, 83640]
plt.plot(dev_x, py_dev_y, label='Python')
plt.xlabel('Ages')
plt.ylabel('Median Salary')
plt.title('Median Salary (USD) by Age')
#Shows the title above the figure
plt.legend()
#This shows indexing of the chart or figure
plt.show()
|
flexible
|
{
"blob_id": "796a13de72c2879956c5f9c9c9bdef7253760c9d",
"index": 9895,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.plot(dev_x, dev_y, label='All Devs')\n<mask token>\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\ndev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317, \n 68748, 73752]\nplt.plot(dev_x, dev_y, label='All Devs')\npy_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, \n 75370, 83640]\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-4": "from matplotlib import pyplot as plt\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\ndev_y = [4000, 45000, 50000, 55000, 60000, 56000, 62316, 64928, 67317, \n 68748, 73752]\nplt.plot(dev_x, dev_y, label='All Devs')\npy_dev_y = [45372, 48876, 53850, 57287, 63016, 65998, 70003, 70000, 71496, \n 75370, 83640]\nplt.plot(dev_x, py_dev_y, label='Python')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary')\nplt.title('Median Salary (USD) by Age')\nplt.legend()\nplt.show()\n",
"step-5": "from matplotlib import pyplot as plt\n\n\n\n\ndev_x = [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35]\n\ndev_y = [4000, 45000, 50000, 55000, 60000,\n 56000, 62316, 64928, 67317, 68748, 73752]\n\nplt.plot(dev_x, dev_y, label='All Devs')\n#dev_x and dev_y are respectively x-axis and y-axis\n\n\n\n\n\n# Median Python Developer Salaries by Age\n\npy_dev_y = [45372, 48876, 53850, 57287, 63016,\n 65998, 70003, 70000, 71496, 75370, 83640]\n\nplt.plot(dev_x, py_dev_y, label='Python')\n\n\n\n\n\nplt.xlabel('Ages')\n\nplt.ylabel('Median Salary')\n\nplt.title('Median Salary (USD) by Age')\n#Shows the title above the figure\n\nplt.legend()\n#This shows indexing of the chart or figure\n\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def two_teams(sailors):
result = []
temp = [[], []]
for i in sailors.items():
if i[1] > 40 or i[1] < 20:
temp[0].append(i[0])
else:
temp[1].append(i[0])
result.append(sorted(temp[0]))
result.append(sorted(temp[1]))
return result
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def two_teams(sailors):
result = []
temp = [[], []]
for i in sailors.items():
if i[1] > 40 or i[1] < 20:
temp[0].append(i[0])
else:
temp[1].append(i[0])
result.append(sorted(temp[0]))
result.append(sorted(temp[1]))
return result
if __name__ == '__main__':
print('Example:')
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19})
)
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,
'McCortney': 54}))
assert two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}
) == [['Abrahams', 'Coleman'], ['Smith', 'Wesson']]
assert two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,
'McCortney': 54}) == [['Fernandes', 'Kale', 'McCortney'], ['Johnson']]
print("Coding complete? Click 'Check' to earn cool rewards!")
<|reserved_special_token_1|>
#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/
def two_teams(sailors):
result = [] #To store the result
temp = [[],[]] #To store the intermediatary values
for i in sailors.items(): #To get the values of dictionary as Tuple
if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship
temp[0].append(i[0]) #Adding each person name to first Temp List
else: #To get the people to be added to the Second Ship
temp[1].append(i[0]) #Adding each person name to second Temp List
result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant
result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant
return result #Return the result
if __name__ == '__main__':
print("Example:")
print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))
print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))
#These "asserts" using only for self-checking and not necessary for auto-testing
assert two_teams({
'Smith': 34,
'Wesson': 22,
'Coleman': 45,
'Abrahams': 19}) == [
['Abrahams', 'Coleman'],
['Smith', 'Wesson']
]
assert two_teams({
'Fernandes': 18,
'Johnson': 22,
'Kale': 41,
'McCortney': 54}) == [
['Fernandes', 'Kale', 'McCortney'],
['Johnson']
]
print("Coding complete? Click 'Check' to earn cool rewards!")
|
flexible
|
{
"blob_id": "de634c95fddf4591cb15cd0eb20e798043075798",
"index": 2464,
"step-1": "<mask token>\n",
"step-2": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\n<mask token>\n",
"step-3": "def two_teams(sailors):\n result = []\n temp = [[], []]\n for i in sailors.items():\n if i[1] > 40 or i[1] < 20:\n temp[0].append(i[0])\n else:\n temp[1].append(i[0])\n result.append(sorted(temp[0]))\n result.append(sorted(temp[1]))\n return result\n\n\nif __name__ == '__main__':\n print('Example:')\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19})\n )\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}))\n assert two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}\n ) == [['Abrahams', 'Coleman'], ['Smith', 'Wesson']]\n assert two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41,\n 'McCortney': 54}) == [['Fernandes', 'Kale', 'McCortney'], ['Johnson']]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n",
"step-4": "#Answer to The Ship Teams - https://py.checkio.org/en/mission/the-ship-teams/\n\ndef two_teams(sailors):\n result = [] #To store the result\n temp = [[],[]] #To store the intermediatary values\n for i in sailors.items(): #To get the values of dictionary as Tuple\n if i[1] > 40 or i[1] < 20: #To get the people to be added to the First Ship\n temp[0].append(i[0]) #Adding each person name to first Temp List\n else: #To get the people to be added to the Second Ship\n temp[1].append(i[0]) #Adding each person name to second Temp List\n result.append(sorted(temp[0])) #Adding all the names of the Ship 1 to resultant\n result.append(sorted(temp[1])) #Adding all the names of the Ship 2 to resultant\n return result #Return the result\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(two_teams({'Smith': 34, 'Wesson': 22, 'Coleman': 45, 'Abrahams': 19}))\n print(two_teams({'Fernandes': 18, 'Johnson': 22, 'Kale': 41, 'McCortney': 54}))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert two_teams({\n 'Smith': 34, \n 'Wesson': 22, \n 'Coleman': 45, \n 'Abrahams': 19}) == [\n ['Abrahams', 'Coleman'], \n ['Smith', 'Wesson']\n ]\n\n assert two_teams({\n 'Fernandes': 18,\n 'Johnson': 22,\n 'Kale': 41,\n 'McCortney': 54}) == [\n ['Fernandes', 'Kale', 'McCortney'], \n ['Johnson']\n ]\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class demo:
<|reserved_special_token_0|>
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A:
return 'BUTTON_A'
if keyBtn == CONFIG_KEY.BUTTON_ACT_B:
return 'BUTTON_B'
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:
return 'JOY_UP'
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:
return 'JOY_DOWN'
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:
return 'JOY_RIGHT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:
return 'JOY_LEFT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:
return 'JOY_CENTER'
return 'UNKNOW'
def onKeyButtonDown(self, channel):
print('DOWN:\t{}'.format(self._getKeyButtonName(channel)))
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print(
'\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n'
)
self.initKeyButtons('INT')
while True:
if self.readExitButtonStatus():
break
pass
self.releaseKeyButtons()
GPIO.cleanup()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A:
return 'BUTTON_A'
if keyBtn == CONFIG_KEY.BUTTON_ACT_B:
return 'BUTTON_B'
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:
return 'JOY_UP'
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:
return 'JOY_DOWN'
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:
return 'JOY_RIGHT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:
return 'JOY_LEFT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:
return 'JOY_CENTER'
return 'UNKNOW'
def onKeyButtonDown(self, channel):
print('DOWN:\t{}'.format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print('UP:\t{}\n'.format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode='INT'):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == 'INT':
try:
self._myKey.configKeyButtons(enableButtons=[{'id':
CONFIG_KEY.BUTTON_ACT_A, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {
'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},
{'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.
_callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)
except:
pass
if mode.upper() == 'QUERY':
self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,
'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,
'callback': None}])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.
BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.
BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton(keyBtn) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton(keyBtn) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print(
'\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n'
)
self.initKeyButtons('INT')
while True:
if self.readExitButtonStatus():
break
pass
self.releaseKeyButtons()
GPIO.cleanup()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CONFIG_KEY:
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A:
return 'BUTTON_A'
if keyBtn == CONFIG_KEY.BUTTON_ACT_B:
return 'BUTTON_B'
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:
return 'JOY_UP'
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:
return 'JOY_DOWN'
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:
return 'JOY_RIGHT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:
return 'JOY_LEFT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:
return 'JOY_CENTER'
return 'UNKNOW'
def onKeyButtonDown(self, channel):
print('DOWN:\t{}'.format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print('UP:\t{}\n'.format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode='INT'):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == 'INT':
try:
self._myKey.configKeyButtons(enableButtons=[{'id':
CONFIG_KEY.BUTTON_ACT_A, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {
'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},
{'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.
_callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)
except:
pass
if mode.upper() == 'QUERY':
self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,
'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,
'callback': None}])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.
BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.
BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton(keyBtn) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton(keyBtn) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print(
'\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n'
)
self.initKeyButtons('INT')
while True:
if self.readExitButtonStatus():
break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == '__main__':
demo().run()
print('Key buttons demo is end.')
<|reserved_special_token_1|>
from time import sleep
import RPi.GPIO as GPIO
from JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL
class CONFIG_KEY:
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A:
return 'BUTTON_A'
if keyBtn == CONFIG_KEY.BUTTON_ACT_B:
return 'BUTTON_B'
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:
return 'JOY_UP'
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:
return 'JOY_DOWN'
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:
return 'JOY_RIGHT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:
return 'JOY_LEFT'
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:
return 'JOY_CENTER'
return 'UNKNOW'
def onKeyButtonDown(self, channel):
print('DOWN:\t{}'.format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print('UP:\t{}\n'.format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode='INT'):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == 'INT':
try:
self._myKey.configKeyButtons(enableButtons=[{'id':
CONFIG_KEY.BUTTON_ACT_A, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {
'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.
_callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': self._callbackKeyButton}, {'id': CONFIG_KEY
.BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},
{'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.
_callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)
except:
pass
if mode.upper() == 'QUERY':
self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,
'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,
'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,
'callback': None}])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.
BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.
BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton(keyBtn) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton(keyBtn) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print(
'\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n'
)
self.initKeyButtons('INT')
while True:
if self.readExitButtonStatus():
break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == '__main__':
demo().run()
print('Key buttons demo is end.')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
#
# RPi.Spark KeyButton Demo
#
# Author: Kunpeng Zhang
# 2018.6.6
#
# See LICENSE for details.
from time import sleep
import RPi.GPIO as GPIO
from JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL
########################################################################
# Key buttons include Joystick buttons and Action buttons,
# use BCM mode, there are keyboard layout:
#
# [JOY UP]
# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]
# [JOY DOWN]
#
class CONFIG_KEY:
# Action Buttons BCM_IO_NUM
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
# Joy Buttons BCM_IO_NUM
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return "BUTTON_A"
if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return "BUTTON_B"
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return "JOY_UP"
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return "JOY_DOWN"
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return "JOY_RIGHT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return "JOY_LEFT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return "JOY_CENTER"
return "UNKNOW"
def onKeyButtonDown(self, channel):
print("DOWN:\t{}".format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print("UP:\t{}\n".format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode = "INT"):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == "INT":
try:
self._myKey.configKeyButtons(
enableButtons = [
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":self._callbackKeyButton}
],
bounceTime = DEF_BOUNCE_TIME_SHORT_MON )
except:
pass
if mode.upper() == "QUERY":
self._myKey.configKeyButtons([
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":None},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":None}
])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([
CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B,
CONFIG_KEY.BUTTON_JOY_UP,
CONFIG_KEY.BUTTON_JOY_DOWN,
CONFIG_KEY.BUTTON_JOY_LEFT,
CONFIG_KEY.BUTTON_JOY_RIGHT,
CONFIG_KEY.BUTTON_JOY_OK
])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton( keyBtn ) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton( keyBtn ) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print("\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n")
self.initKeyButtons("INT")
while True:
if self.readExitButtonStatus(): break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == "__main__":
demo().run()
print("Key buttons demo is end.")
|
flexible
|
{
"blob_id": "50c274e0365f2556a46eb58edcd1f0a7301e89db",
"index": 8716,
"step-1": "<mask token>\n\n\nclass demo:\n <mask token>\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-4": "from time import sleep\nimport RPi.GPIO as GPIO\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n\nclass CONFIG_KEY:\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A:\n return 'BUTTON_A'\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B:\n return 'BUTTON_B'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP:\n return 'JOY_UP'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN:\n return 'JOY_DOWN'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT:\n return 'JOY_RIGHT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT:\n return 'JOY_LEFT'\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK:\n return 'JOY_CENTER'\n return 'UNKNOW'\n\n def onKeyButtonDown(self, channel):\n print('DOWN:\\t{}'.format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print('UP:\\t{}\\n'.format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode='INT'):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == 'INT':\n try:\n self._myKey.configKeyButtons(enableButtons=[{'id':\n CONFIG_KEY.BUTTON_ACT_A, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_UP, 'callback': self._callbackKeyButton}, {\n 'id': CONFIG_KEY.BUTTON_JOY_DOWN, 'callback': self.\n _callbackKeyButton}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': self._callbackKeyButton}, {'id': CONFIG_KEY\n .BUTTON_JOY_RIGHT, 'callback': self._callbackKeyButton},\n {'id': CONFIG_KEY.BUTTON_JOY_OK, 'callback': self.\n _callbackKeyButton}], bounceTime=DEF_BOUNCE_TIME_SHORT_MON)\n except:\n pass\n if mode.upper() == 'QUERY':\n self._myKey.configKeyButtons([{'id': CONFIG_KEY.BUTTON_ACT_A,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_ACT_B,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_OK,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_UP,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_DOWN,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_LEFT,\n 'callback': None}, {'id': CONFIG_KEY.BUTTON_JOY_RIGHT,\n 'callback': None}])\n\n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B, CONFIG_KEY.BUTTON_JOY_UP, CONFIG_KEY.\n BUTTON_JOY_DOWN, CONFIG_KEY.BUTTON_JOY_LEFT, CONFIG_KEY.\n BUTTON_JOY_RIGHT, CONFIG_KEY.BUTTON_JOY_OK])\n\n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton(keyBtn) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton(keyBtn) else 1\n return 0\n\n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\n '\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n'\n )\n self.initKeyButtons('INT')\n while True:\n if self.readExitButtonStatus():\n break\n pass\n self.releaseKeyButtons()\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n demo().run()\n print('Key buttons demo is end.')\n",
"step-5": "# -*- coding: utf-8 -*-\n#\n# RPi.Spark KeyButton Demo\n#\n# Author: Kunpeng Zhang\n# 2018.6.6\n#\n# See LICENSE for details.\n\nfrom time import sleep\nimport RPi.GPIO as GPIO\n\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON\nfrom JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL\n\n########################################################################\n# Key buttons include Joystick buttons and Action buttons, \n# use BCM mode, there are keyboard layout:\n# \n# [JOY UP] \n# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]\n# [JOY DOWN] \n#\nclass CONFIG_KEY:\n # Action Buttons BCM_IO_NUM\n BUTTON_ACT_A = 22\n BUTTON_ACT_B = 23\n \n # Joy Buttons BCM_IO_NUM\n BUTTON_JOY_LEFT = 26\n BUTTON_JOY_RIGHT = 27\n BUTTON_JOY_UP = 5\n BUTTON_JOY_DOWN = 6\n BUTTON_JOY_OK = 24\n\nclass demo:\n _myKey = None\n\n def __init__(self):\n self._myKey = RPiKeyButtons()\n\n def _getKeyButtonName(self, keyBtn):\n if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return \"BUTTON_A\"\n if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return \"BUTTON_B\"\n \n if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return \"JOY_UP\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return \"JOY_DOWN\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return \"JOY_RIGHT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return \"JOY_LEFT\"\n if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return \"JOY_CENTER\"\n return \"UNKNOW\"\n\n def onKeyButtonDown(self, channel):\n print(\"DOWN:\\t{}\".format(self._getKeyButtonName(channel)))\n pass\n\n def onKeyButtonUp(self, channel):\n print(\"UP:\\t{}\\n\".format(self._getKeyButtonName(channel)))\n pass\n\n def _callbackKeyButton(self, channel):\n \"\"\"!\n Key button interrupt event callback function\n Inherit this method to implement your want\n \"\"\"\n if self._myKey.readKeyButton(channel) == 0:\n self.onKeyButtonDown(channel)\n return\n\n if self._myKey.readKeyButton(channel) == 1:\n self.onKeyButtonUp(channel)\n return\n\n def initKeyButtons(self, mode = \"INT\"):\n \"\"\"!\n Init all key buttons interrupt events or query mode. \n Inherit the onKeyButtonDown and onKeyButtonUp to implement your want\n\n @param mode: Can be { \"INT\" | \"QUERY\" }, default is \"INT\" \n \"\"\"\n if mode.upper() == \"INT\":\n try:\n self._myKey.configKeyButtons(\n enableButtons = [\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":self._callbackKeyButton},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":self._callbackKeyButton}\n ],\n bounceTime = DEF_BOUNCE_TIME_SHORT_MON )\n except:\n pass\n\n if mode.upper() == \"QUERY\":\n self._myKey.configKeyButtons([\n {\"id\":CONFIG_KEY.BUTTON_ACT_A, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_ACT_B, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_OK, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_UP, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_DOWN, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_LEFT, \"callback\":None},\n {\"id\":CONFIG_KEY.BUTTON_JOY_RIGHT, \"callback\":None}\n ])\n \n def releaseKeyButtons(self):\n \"\"\"!\n Release all key button events\n \"\"\"\n self._myKey.removeKeyButtonEvent([\n CONFIG_KEY.BUTTON_ACT_A,\n CONFIG_KEY.BUTTON_ACT_B,\n CONFIG_KEY.BUTTON_JOY_UP,\n CONFIG_KEY.BUTTON_JOY_DOWN,\n CONFIG_KEY.BUTTON_JOY_LEFT,\n CONFIG_KEY.BUTTON_JOY_RIGHT,\n CONFIG_KEY.BUTTON_JOY_OK\n ])\n \n def readKeyButton(self, keyBtn):\n \"\"\"!\n Read key button status, return 0 / 1\n \"\"\"\n if self._myKey.readKeyButton( keyBtn ) == 0:\n sleep(0.02)\n return 0 if self._myKey.readKeyButton( keyBtn ) else 1\n return 0\n \n def readExitButtonStatus(self):\n \"\"\"!\n Read Exit action ( button A and Joy UP press down same time )\n \"\"\"\n pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)\n pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)\n return pressA and pressUp\n\n def run(self):\n print(\"\\nPress any key button to test ...\\n < JOY UP + Button A to Exit >\\n\\n\")\n self.initKeyButtons(\"INT\")\n\n while True:\n if self.readExitButtonStatus(): break\n pass\n\n self.releaseKeyButtons()\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n demo().run()\n print(\"Key buttons demo is end.\")",
"step-ids": [
6,
12,
15,
16,
17
]
}
|
[
6,
12,
15,
16,
17
] |
# Copyright (C) 2019 Catalyst Cloud Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from logging import getLogger
from confspirator import groups
from confspirator import fields
from adjutant import actions as adj_actions
from adjutant.api.models import Task
from adjutant.config import CONF
from django.utils import timezone
from adjutant.notifications.utils import create_notification
from adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error
from adjutant import exceptions
def make_task_config(task_class):
config_group = groups.DynamicNameConfigGroup()
config_group.register_child_config(
fields.BoolConfig(
"allow_auto_approve",
help_text="Override if this task allows auto_approval. "
"Otherwise uses task default.",
default=task_class.allow_auto_approve,
)
)
config_group.register_child_config(
fields.ListConfig(
"additional_actions",
help_text="Additional actions to be run as part of the task "
"after default actions.",
default=task_class.additional_actions or [],
)
)
config_group.register_child_config(
fields.IntConfig(
"token_expiry",
help_text="Override for the task token expiry. "
"Otherwise uses task default.",
default=task_class.token_expiry,
)
)
config_group.register_child_config(
fields.DictConfig(
"actions",
help_text="Action config overrides over the action defaults. "
"See 'adjutant.workflow.action_defaults'.",
is_json=True,
default=task_class.action_config or {},
sample_default={
"SomeCustomAction": {"some_action_setting": "<a-uuid-probably>"}
},
)
)
config_group.register_child_config(
fields.DictConfig(
"emails",
help_text="Email config overrides for this task over task defaults."
"See 'adjutant.workflow.emails'.",
is_json=True,
default=task_class.email_config or {},
sample_default={
"initial": None,
"token": {
"subject": "Some custom subject",
},
},
)
)
config_group.register_child_config(
fields.DictConfig(
"notifications",
help_text="Notification config overrides for this task over task defaults."
"See 'adjutant.workflow.notifications'.",
is_json=True,
default=task_class.notification_config or {},
sample_default={
"standard_handlers": ["EmailNotification"],
"error_handlers": ["EmailNotification"],
"standard_handler_config": {
"EmailNotification": {
"emails": ["[email protected]"],
"reply": "[email protected]",
}
},
"error_handler_config": {
"EmailNotification": {
"emails": ["[email protected]"],
"reply": "[email protected]",
}
},
},
)
)
return config_group
class BaseTask(object):
"""
Base class for in memory task representation.
This serves as the internal task logic handler, and is used to
define what a task looks like.
Most of the time this class shouldn't be called or used directly
as the task manager is what handles the direct interaction to the
logic here, and includes some wrapper logic to help deal with workflows.
"""
# required values in custom task
task_type = None
default_actions = None
# default values to optionally override in task definition
deprecated_task_types = None
duplicate_policy = "cancel"
send_approval_notification = True
token_requires_authentication = False
# config defaults for the task (used to generate default config):
allow_auto_approve = True
additional_actions = None
token_expiry = None
action_config = None
email_config = None
notification_config = None
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger("adjutant")
if task_model:
self.task = task_model
self._refresh_actions()
else:
# raises 400 validation error
action_serializer_list = self._instantiate_action_serializers(action_data)
hash_key = self._create_task_hash(action_serializer_list)
# raises duplicate error
self._handle_duplicates(hash_key)
keystone_user = task_data.get("keystone_user", {})
self.task = Task.objects.create(
keystone_user=keystone_user,
project_id=keystone_user.get("project_id"),
task_type=self.task_type,
hash_key=hash_key,
)
self.task.save()
# Instantiate actions with serializers
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action["serializer"].validated_data
# construct the action class
self.actions.append(
action["action"](data=data, task=self.task, order=i)
)
self.logger.info(
"(%s) - '%s' task created (%s)."
% (timezone.now(), self.task_type, self.task.uuid)
)
def _instantiate_action_serializers(self, action_data, use_existing_actions=False):
action_serializer_list = []
if use_existing_actions:
actions = self.actions
else:
actions = self.default_actions[:]
actions += self.config.additional_actions
# instantiate all action serializers and check validity
valid = True
for action in actions:
if use_existing_actions:
action_name = action.action.action_name
else:
action_name = action
action_class = adj_actions.ACTION_CLASSES[action_name]
if use_existing_actions:
action_class = action
# instantiate serializer class
if not action_class.serializer:
raise exceptions.SerializerMissingException(
"No serializer defined for action %s" % action_name
)
serializer = action_class.serializer(data=action_data)
action_serializer_list.append(
{"name": action_name, "action": action_class, "serializer": serializer}
)
if serializer and not serializer.is_valid():
valid = False
if not valid:
errors = {}
for action in action_serializer_list:
if action["serializer"]:
errors.update(action["serializer"].errors)
raise exceptions.TaskSerializersInvalid(errors)
return action_serializer_list
def _create_task_hash(self, action_list):
hashable_list = [
self.task_type,
]
for action in action_list:
hashable_list.append(action["name"])
if not action["serializer"]:
continue
# iterate like this to maintain consistent order for hash
fields = sorted(action["serializer"].validated_data.keys())
for field in fields:
try:
hashable_list.append(action["serializer"].validated_data[field])
except KeyError:
if field == "username" and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode("utf-8")).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(
hash_key=hash_key, completed=0, cancelled=0
)
if not duplicate_tasks:
return
if self.duplicate_policy == "cancel":
now = timezone.now()
self.logger.info("(%s) - Task is a duplicate - Cancelling old tasks." % now)
for task in duplicate_tasks:
task.add_task_note(
"Task cancelled because was an old duplicate. - (%s)" % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note("Token created for task.")
try:
# will throw a key error if the token template has not
# been specified
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text="while sending token")
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info(
"(%s)(%s)(%s) - %s" % (now, self.task_type, self.task.uuid, note)
)
note = "%s - (%s)" % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
# TODO(amelia): get action invalidation reasons and raise those
raise exceptions.TaskActionsInvalid(
self.task, "actions invalid", internal_message
)
@property
def approved(self):
return self.task.approved
@property
def completed(self):
return self.task.completed
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been completed."
)
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(
self.task, "This task hasn't been completed."
)
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has been cancelled."
)
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been cancelled."
)
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been approved."
)
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been approved."
)
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True
)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action["serializer"].validated_data
action["action"].action.action_data = data
action["action"].action.save()
self._refresh_actions()
self.prepare()
def prepare(self):
"""Run the prepare stage for all the actions.
If the task can be auto approved, this will also run the approve
stage.
"""
self.confirm_state(approved=False, completed=False, cancelled=False)
for action in self.actions:
try:
action.prepare()
except Exception as e:
handle_task_error(e, self.task, error_text="while setting up task")
# send initial confirmation email:
email_conf = self.config.emails.initial
send_stage_email(self.task, email_conf)
approve_list = [act.auto_approve for act in self.actions]
# TODO(amelia): It would be nice to explicitly test this, however
# currently we don't have the right combinations of
# actions to allow for it.
if False in approve_list:
can_auto_approve = False
elif True in approve_list:
can_auto_approve = True
else:
can_auto_approve = False
if self.config.allow_auto_approve is not None:
allow_auto_approve = self.config.allow_auto_approve
else:
allow_auto_approve = self.allow_auto_approve
if can_auto_approve and not allow_auto_approve:
self.add_note("Actions allow auto aproval, but task does not.")
elif can_auto_approve:
self.add_note("Action allow auto approval. Auto approving.")
self.approve()
return
if self.send_approval_notification:
notes = {"notes": ["'%s' task needs approval." % self.task_type]}
create_notification(self.task, notes)
def approve(self, approved_by="system"):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid("task invalid before approval")
# We approve the task before running actions,
# that way if something goes wrong we know if it was approved,
# when it was approved, and who approved it.
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
# approve all actions
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text="while approving task")
self.is_valid("task invalid after approval")
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = [
"This field is required.",
]
except TypeError:
errors = ["Improperly formated json. " "Should be a key-value object."]
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid("task invalid before submit")
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, "while submiting task")
self.is_valid("task invalid after submit")
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
# Sending confirmation email:
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
def cancel(self):
self.confirm_state(completed=False, cancelled=False)
self.clear_tokens()
self.task.cancelled = True
self.task.save()
|
normal
|
{
"blob_id": "cc23eeed44ff66d68c700163cca8b9f4986d497d",
"index": 7681,
"step-1": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n <mask token>\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n <mask token>\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n <mask token>\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n\n def _instantiate_action_serializers(self, action_data,\n use_existing_actions=False):\n action_serializer_list = []\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n action_class = adj_actions.ACTION_CLASSES[action_name]\n if use_existing_actions:\n action_class = action\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n 'No serializer defined for action %s' % action_name)\n serializer = action_class.serializer(data=action_data)\n action_serializer_list.append({'name': action_name, 'action':\n action_class, 'serializer': serializer})\n if serializer and not serializer.is_valid():\n valid = False\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action['serializer']:\n errors.update(action['serializer'].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n self.confirm_state(approved=False, completed=False, cancelled=False)\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while setting up task')\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n approve_list = [act.auto_approve for act in self.actions]\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n if can_auto_approve and not allow_auto_approve:\n self.add_note('Actions allow auto aproval, but task does not.')\n elif can_auto_approve:\n self.add_note('Action allow auto approval. Auto approving.')\n self.approve()\n return\n if self.send_approval_notification:\n notes = {'notes': [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-5": "# Copyright (C) 2019 Catalyst Cloud Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport hashlib\nfrom logging import getLogger\n\nfrom confspirator import groups\nfrom confspirator import fields\n\nfrom adjutant import actions as adj_actions\nfrom adjutant.api.models import Task\nfrom adjutant.config import CONF\nfrom django.utils import timezone\nfrom adjutant.notifications.utils import create_notification\nfrom adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error\nfrom adjutant import exceptions\n\n\ndef make_task_config(task_class):\n config_group = groups.DynamicNameConfigGroup()\n config_group.register_child_config(\n fields.BoolConfig(\n \"allow_auto_approve\",\n help_text=\"Override if this task allows auto_approval. \"\n \"Otherwise uses task default.\",\n default=task_class.allow_auto_approve,\n )\n )\n config_group.register_child_config(\n fields.ListConfig(\n \"additional_actions\",\n help_text=\"Additional actions to be run as part of the task \"\n \"after default actions.\",\n default=task_class.additional_actions or [],\n )\n )\n config_group.register_child_config(\n fields.IntConfig(\n \"token_expiry\",\n help_text=\"Override for the task token expiry. \"\n \"Otherwise uses task default.\",\n default=task_class.token_expiry,\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"actions\",\n help_text=\"Action config overrides over the action defaults. \"\n \"See 'adjutant.workflow.action_defaults'.\",\n is_json=True,\n default=task_class.action_config or {},\n sample_default={\n \"SomeCustomAction\": {\"some_action_setting\": \"<a-uuid-probably>\"}\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"emails\",\n help_text=\"Email config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.emails'.\",\n is_json=True,\n default=task_class.email_config or {},\n sample_default={\n \"initial\": None,\n \"token\": {\n \"subject\": \"Some custom subject\",\n },\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"notifications\",\n help_text=\"Notification config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.notifications'.\",\n is_json=True,\n default=task_class.notification_config or {},\n sample_default={\n \"standard_handlers\": [\"EmailNotification\"],\n \"error_handlers\": [\"EmailNotification\"],\n \"standard_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n \"error_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n },\n )\n )\n return config_group\n\n\nclass BaseTask(object):\n \"\"\"\n Base class for in memory task representation.\n\n This serves as the internal task logic handler, and is used to\n define what a task looks like.\n\n Most of the time this class shouldn't be called or used directly\n as the task manager is what handles the direct interaction to the\n logic here, and includes some wrapper logic to help deal with workflows.\n \"\"\"\n\n # required values in custom task\n task_type = None\n default_actions = None\n\n # default values to optionally override in task definition\n deprecated_task_types = None\n duplicate_policy = \"cancel\"\n send_approval_notification = True\n token_requires_authentication = False\n\n # config defaults for the task (used to generate default config):\n allow_auto_approve = True\n additional_actions = None\n token_expiry = None\n action_config = None\n email_config = None\n notification_config = None\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger(\"adjutant\")\n\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n # raises 400 validation error\n action_serializer_list = self._instantiate_action_serializers(action_data)\n\n hash_key = self._create_task_hash(action_serializer_list)\n # raises duplicate error\n self._handle_duplicates(hash_key)\n\n keystone_user = task_data.get(\"keystone_user\", {})\n self.task = Task.objects.create(\n keystone_user=keystone_user,\n project_id=keystone_user.get(\"project_id\"),\n task_type=self.task_type,\n hash_key=hash_key,\n )\n self.task.save()\n\n # Instantiate actions with serializers\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action[\"serializer\"].validated_data\n\n # construct the action class\n self.actions.append(\n action[\"action\"](data=data, task=self.task, order=i)\n )\n self.logger.info(\n \"(%s) - '%s' task created (%s).\"\n % (timezone.now(), self.task_type, self.task.uuid)\n )\n\n def _instantiate_action_serializers(self, action_data, use_existing_actions=False):\n action_serializer_list = []\n\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n\n # instantiate all action serializers and check validity\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n\n action_class = adj_actions.ACTION_CLASSES[action_name]\n\n if use_existing_actions:\n action_class = action\n\n # instantiate serializer class\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n \"No serializer defined for action %s\" % action_name\n )\n serializer = action_class.serializer(data=action_data)\n\n action_serializer_list.append(\n {\"name\": action_name, \"action\": action_class, \"serializer\": serializer}\n )\n\n if serializer and not serializer.is_valid():\n valid = False\n\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action[\"serializer\"]:\n errors.update(action[\"serializer\"].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [\n self.task_type,\n ]\n\n for action in action_list:\n hashable_list.append(action[\"name\"])\n if not action[\"serializer\"]:\n continue\n # iterate like this to maintain consistent order for hash\n fields = sorted(action[\"serializer\"].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action[\"serializer\"].validated_data[field])\n except KeyError:\n if field == \"username\" and CONF.identity.username_is_email:\n continue\n else:\n raise\n\n return hashlib.sha256(str(hashable_list).encode(\"utf-8\")).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(\n hash_key=hash_key, completed=0, cancelled=0\n )\n\n if not duplicate_tasks:\n return\n\n if self.duplicate_policy == \"cancel\":\n now = timezone.now()\n self.logger.info(\"(%s) - Task is a duplicate - Cancelling old tasks.\" % now)\n for task in duplicate_tasks:\n task.add_task_note(\n \"Task cancelled because was an old duplicate. - (%s)\" % now\n )\n task.get_task().cancel()\n return\n\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note(\"Token created for task.\")\n try:\n # will throw a key error if the token template has not\n # been specified\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text=\"while sending token\")\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info(\n \"(%s)(%s)(%s) - %s\" % (now, self.task_type, self.task.uuid, note)\n )\n note = \"%s - (%s)\" % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n # TODO(amelia): get action invalidation reasons and raise those\n raise exceptions.TaskActionsInvalid(\n self.task, \"actions invalid\", internal_message\n )\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been completed.\"\n )\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task hasn't been completed.\"\n )\n\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has been cancelled.\"\n )\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been cancelled.\"\n )\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been approved.\"\n )\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been approved.\"\n )\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True\n )\n\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n\n for action in action_serializer_list:\n data = action[\"serializer\"].validated_data\n\n action[\"action\"].action.action_data = data\n action[\"action\"].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while setting up task\")\n\n # send initial confirmation email:\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n\n approve_list = [act.auto_approve for act in self.actions]\n\n # TODO(amelia): It would be nice to explicitly test this, however\n # currently we don't have the right combinations of\n # actions to allow for it.\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n\n if can_auto_approve and not allow_auto_approve:\n self.add_note(\"Actions allow auto aproval, but task does not.\")\n elif can_auto_approve:\n self.add_note(\"Action allow auto approval. Auto approving.\")\n self.approve()\n return\n\n if self.send_approval_notification:\n notes = {\"notes\": [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by=\"system\"):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n\n if not token_data:\n token_data = {}\n\n errors = {}\n data = {}\n\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = [\n \"This field is required.\",\n ]\n except TypeError:\n errors = [\"Improperly formated json. \" \"Should be a key-value object.\"]\n break\n\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n\n self.is_valid(\"task invalid before submit\")\n\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, \"while submiting task\")\n\n self.is_valid(\"task invalid after submit\")\n\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n\n # Sending confirmation email:\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n\n def cancel(self):\n self.confirm_state(completed=False, cancelled=False)\n self.clear_tokens()\n self.task.cancelled = True\n self.task.save()\n",
"step-ids": [
14,
17,
18,
20,
26
]
}
|
[
14,
17,
18,
20,
26
] |
def count_words(word):
count = 0
count = len(word.split())
return count
if __name__ == '__main__':
print count_words("Boj is dope")
|
normal
|
{
"blob_id": "9f3b7d6dbf57157b5ebd6ad72f46befc94798a5f",
"index": 3845,
"step-1": "def count_words(word):\n\tcount = 0\n\tcount = len(word.split())\n\treturn count\n\n\nif __name__ == '__main__':\n\tprint count_words(\"Boj is dope\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class TubeloadResolver(ResolveUrl):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template=
'https://{host}/e/{media_id}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TubeloadResolver(ResolveUrl):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
rurl = 'https://{}/'.format(host)
headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
if 'NOT FOUND' in html or 'Sorry' in html:
raise ResolverError('File Removed')
if jsunhunt.detect(html):
html = re.findall('<head>(.*?)</head>', html, re.S)[0]
html = jsunhunt.unhunt(html)
source = re.search('var\\s*adbbdddffbad\\s*=\\s*"([^"]+)', html)
if source:
headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})
url = source.group(1).replace(
'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')
url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',
'')
url = base64.b64decode(url).decode('utf-8')
return url + helpers.append_headers(headers)
raise ResolverError('File Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template=
'https://{host}/e/{media_id}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TubeloadResolver(ResolveUrl):
name = 'tubeload'
domains = ['tubeload.co']
pattern = '(?://|\\.)(tubeload\\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
rurl = 'https://{}/'.format(host)
headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
if 'NOT FOUND' in html or 'Sorry' in html:
raise ResolverError('File Removed')
if jsunhunt.detect(html):
html = re.findall('<head>(.*?)</head>', html, re.S)[0]
html = jsunhunt.unhunt(html)
source = re.search('var\\s*adbbdddffbad\\s*=\\s*"([^"]+)', html)
if source:
headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})
url = source.group(1).replace(
'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')
url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',
'')
url = base64.b64decode(url).decode('utf-8')
return url + helpers.append_headers(headers)
raise ResolverError('File Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template=
'https://{host}/e/{media_id}')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import re
import base64
from resolveurl import common
from resolveurl.plugins.lib import helpers, jsunhunt
from resolveurl.resolver import ResolveUrl, ResolverError
class TubeloadResolver(ResolveUrl):
name = 'tubeload'
domains = ['tubeload.co']
pattern = '(?://|\\.)(tubeload\\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
rurl = 'https://{}/'.format(host)
headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
if 'NOT FOUND' in html or 'Sorry' in html:
raise ResolverError('File Removed')
if jsunhunt.detect(html):
html = re.findall('<head>(.*?)</head>', html, re.S)[0]
html = jsunhunt.unhunt(html)
source = re.search('var\\s*adbbdddffbad\\s*=\\s*"([^"]+)', html)
if source:
headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})
url = source.group(1).replace(
'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')
url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',
'')
url = base64.b64decode(url).decode('utf-8')
return url + helpers.append_headers(headers)
raise ResolverError('File Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template=
'https://{host}/e/{media_id}')
<|reserved_special_token_1|>
"""
Plugin for ResolveUrl
Copyright (C) 2022 shellc0de
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import base64
from resolveurl import common
from resolveurl.plugins.lib import helpers, jsunhunt
from resolveurl.resolver import ResolveUrl, ResolverError
class TubeloadResolver(ResolveUrl):
name = 'tubeload'
domains = ['tubeload.co']
pattern = r'(?://|\.)(tubeload\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
rurl = 'https://{}/'.format(host)
headers = {
'Referer': rurl,
'User-Agent': common.FF_USER_AGENT
}
html = self.net.http_GET(web_url, headers=headers).content
if 'NOT FOUND' in html or 'Sorry' in html:
raise ResolverError('File Removed')
if jsunhunt.detect(html):
html = re.findall('<head>(.*?)</head>', html, re.S)[0]
html = jsunhunt.unhunt(html)
source = re.search(r'var\s*adbbdddffbad\s*=\s*"([^"]+)', html)
if source:
headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})
url = source.group(1).replace('MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')
url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=', '')
url = base64.b64decode(url).decode('utf-8')
return url + helpers.append_headers(headers)
raise ResolverError('File Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/e/{media_id}')
|
flexible
|
{
"blob_id": "8dfea24545ec4bb95b66d4b5ff3c4936990eb73a",
"index": 9500,
"step-1": "<mask token>\n\n\nclass TubeloadResolver(ResolveUrl):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_url(self, host, media_id):\n return self._default_get_url(host, media_id, template=\n 'https://{host}/e/{media_id}')\n",
"step-2": "<mask token>\n\n\nclass TubeloadResolver(ResolveUrl):\n <mask token>\n <mask token>\n <mask token>\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n rurl = 'https://{}/'.format(host)\n headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}\n html = self.net.http_GET(web_url, headers=headers).content\n if 'NOT FOUND' in html or 'Sorry' in html:\n raise ResolverError('File Removed')\n if jsunhunt.detect(html):\n html = re.findall('<head>(.*?)</head>', html, re.S)[0]\n html = jsunhunt.unhunt(html)\n source = re.search('var\\\\s*adbbdddffbad\\\\s*=\\\\s*\"([^\"]+)', html)\n if source:\n headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})\n url = source.group(1).replace(\n 'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')\n url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',\n '')\n url = base64.b64decode(url).decode('utf-8')\n return url + helpers.append_headers(headers)\n raise ResolverError('File Not Found')\n\n def get_url(self, host, media_id):\n return self._default_get_url(host, media_id, template=\n 'https://{host}/e/{media_id}')\n",
"step-3": "<mask token>\n\n\nclass TubeloadResolver(ResolveUrl):\n name = 'tubeload'\n domains = ['tubeload.co']\n pattern = '(?://|\\\\.)(tubeload\\\\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n rurl = 'https://{}/'.format(host)\n headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}\n html = self.net.http_GET(web_url, headers=headers).content\n if 'NOT FOUND' in html or 'Sorry' in html:\n raise ResolverError('File Removed')\n if jsunhunt.detect(html):\n html = re.findall('<head>(.*?)</head>', html, re.S)[0]\n html = jsunhunt.unhunt(html)\n source = re.search('var\\\\s*adbbdddffbad\\\\s*=\\\\s*\"([^\"]+)', html)\n if source:\n headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})\n url = source.group(1).replace(\n 'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')\n url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',\n '')\n url = base64.b64decode(url).decode('utf-8')\n return url + helpers.append_headers(headers)\n raise ResolverError('File Not Found')\n\n def get_url(self, host, media_id):\n return self._default_get_url(host, media_id, template=\n 'https://{host}/e/{media_id}')\n",
"step-4": "<mask token>\nimport re\nimport base64\nfrom resolveurl import common\nfrom resolveurl.plugins.lib import helpers, jsunhunt\nfrom resolveurl.resolver import ResolveUrl, ResolverError\n\n\nclass TubeloadResolver(ResolveUrl):\n name = 'tubeload'\n domains = ['tubeload.co']\n pattern = '(?://|\\\\.)(tubeload\\\\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n rurl = 'https://{}/'.format(host)\n headers = {'Referer': rurl, 'User-Agent': common.FF_USER_AGENT}\n html = self.net.http_GET(web_url, headers=headers).content\n if 'NOT FOUND' in html or 'Sorry' in html:\n raise ResolverError('File Removed')\n if jsunhunt.detect(html):\n html = re.findall('<head>(.*?)</head>', html, re.S)[0]\n html = jsunhunt.unhunt(html)\n source = re.search('var\\\\s*adbbdddffbad\\\\s*=\\\\s*\"([^\"]+)', html)\n if source:\n headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})\n url = source.group(1).replace(\n 'MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')\n url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=',\n '')\n url = base64.b64decode(url).decode('utf-8')\n return url + helpers.append_headers(headers)\n raise ResolverError('File Not Found')\n\n def get_url(self, host, media_id):\n return self._default_get_url(host, media_id, template=\n 'https://{host}/e/{media_id}')\n",
"step-5": "\"\"\"\n Plugin for ResolveUrl\n Copyright (C) 2022 shellc0de\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\n\nimport re\nimport base64\nfrom resolveurl import common\nfrom resolveurl.plugins.lib import helpers, jsunhunt\nfrom resolveurl.resolver import ResolveUrl, ResolverError\n\n\nclass TubeloadResolver(ResolveUrl):\n name = 'tubeload'\n domains = ['tubeload.co']\n pattern = r'(?://|\\.)(tubeload\\.co)/(?:embed|e|f)/([0-9a-zA-Z]+)'\n\n def get_media_url(self, host, media_id):\n web_url = self.get_url(host, media_id)\n rurl = 'https://{}/'.format(host)\n headers = {\n 'Referer': rurl,\n 'User-Agent': common.FF_USER_AGENT\n }\n html = self.net.http_GET(web_url, headers=headers).content\n if 'NOT FOUND' in html or 'Sorry' in html:\n raise ResolverError('File Removed')\n\n if jsunhunt.detect(html):\n html = re.findall('<head>(.*?)</head>', html, re.S)[0]\n html = jsunhunt.unhunt(html)\n\n source = re.search(r'var\\s*adbbdddffbad\\s*=\\s*\"([^\"]+)', html)\n if source:\n headers.update({'Origin': rurl[:-1], 'verifypeer': 'false'})\n url = source.group(1).replace('MzY3Y2E4NTAzNmQ5NDkzN2FiNTQzZTBiNmI4YTIwYzg', '')\n url = url.replace('NjYxOWU2OTNmZWQ0M2I3ZTFhM2U4NTc4Y2NhZmY3NmM=', '')\n url = base64.b64decode(url).decode('utf-8')\n return url + helpers.append_headers(headers)\n\n raise ResolverError('File Not Found')\n\n def get_url(self, host, media_id):\n return self._default_get_url(host, media_id, template='https://{host}/e/{media_id}')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import torch.nn as nn
def my_loss():
return nn.CrossEntropyLoss()
|
normal
|
{
"blob_id": "418f2e1cbe4fb3ef369e981e72bf40eeddfd052e",
"index": 2408,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_loss():\n return nn.CrossEntropyLoss()\n",
"step-3": "import torch.nn as nn\n\n\ndef my_loss():\n return nn.CrossEntropyLoss()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Trap(GameObject):
<|reserved_special_token_0|>
def __init__(self, gamedir, filename=None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
<|reserved_special_token_0|>
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trap(GameObject):
<|reserved_special_token_0|>
def __init__(self, gamedir, filename=None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
def read_in_config(self, filename):
parser = GameObject.read_in_config(self, filename)
if parser.has_section('attacks'):
self.attacks = mb_subs.actions(parser.items('attacks'))
del parser
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Trap(GameObject):
"""
This class is used to create traps (or blessing objects) that exist
in the arena on their own but that are not subject to attack.
The only real attributes traps have is different types of attacks that
they can carry out on combatants in the arena.
"""
def __init__(self, gamedir, filename=None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
def read_in_config(self, filename):
parser = GameObject.read_in_config(self, filename)
if parser.has_section('attacks'):
self.attacks = mb_subs.actions(parser.items('attacks'))
del parser
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
<|reserved_special_token_1|>
import random
import mb_io
import mb_subs
from mb_go import GameObject
class Trap(GameObject):
"""
This class is used to create traps (or blessing objects) that exist
in the arena on their own but that are not subject to attack.
The only real attributes traps have is different types of attacks that
they can carry out on combatants in the arena.
"""
def __init__(self, gamedir, filename=None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
def read_in_config(self, filename):
parser = GameObject.read_in_config(self, filename)
if parser.has_section('attacks'):
self.attacks = mb_subs.actions(parser.items('attacks'))
del parser
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
<|reserved_special_token_1|>
# -------------------------------------------------------------------------
# File: mb_trap.py
# Created: Tue Feb 7 20:51:32 2006
# -------------------------------------------------------------------------
import random
import mb_io
import mb_subs
from mb_go import GameObject
class Trap(GameObject):
"""
This class is used to create traps (or blessing objects) that exist
in the arena on their own but that are not subject to attack.
The only real attributes traps have is different types of attacks that
they can carry out on combatants in the arena.
"""
def __init__(self, gamedir, filename = None):
self.attacks = list()
self.x = 0
self.y = 0
self.radius = 0
self.is_first_round = True
GameObject.__init__(self, gamedir, filename)
def read_in_config(self, filename):
parser = GameObject.read_in_config(self, filename)
if parser.has_section('attacks'):
self.attacks = mb_subs.actions(parser.items('attacks'))
del parser
def trigger_trap(self, victim):
attac = random.choice(self.attacks)
attack = attac[0]
damage = attac[1]
victim.health = mb_subs.subtract_to_floor(victim.health, damage)
if damage >= 0:
commentary = '(OH NO!) %s' % (attack % victim.name)
else:
commentary = '(WOW!) %s' % (attack % victim.name)
return commentary, damage
|
flexible
|
{
"blob_id": "f2a94f6bfe86af439a8248b40732340c45d89b93",
"index": 9925,
"step-1": "<mask token>\n\n\nclass Trap(GameObject):\n <mask token>\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n <mask token>\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-2": "<mask token>\n\n\nclass Trap(GameObject):\n <mask token>\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-3": "<mask token>\n\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-4": "import random\nimport mb_io\nimport mb_subs\nfrom mb_go import GameObject\n\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n\n def __init__(self, gamedir, filename=None):\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-5": "# -------------------------------------------------------------------------\n# File: mb_trap.py\n# Created: Tue Feb 7 20:51:32 2006\n# -------------------------------------------------------------------------\n\nimport random\n\nimport mb_io\nimport mb_subs\nfrom mb_go import GameObject\n\nclass Trap(GameObject):\n \"\"\"\n This class is used to create traps (or blessing objects) that exist\n in the arena on their own but that are not subject to attack.\n The only real attributes traps have is different types of attacks that\n they can carry out on combatants in the arena.\n\n \"\"\"\n def __init__(self, gamedir, filename = None):\n\n self.attacks = list()\n self.x = 0\n self.y = 0\n self.radius = 0\n self.is_first_round = True\n GameObject.__init__(self, gamedir, filename)\n\n def read_in_config(self, filename):\n parser = GameObject.read_in_config(self, filename)\n if parser.has_section('attacks'):\n self.attacks = mb_subs.actions(parser.items('attacks'))\n del parser\n\n def trigger_trap(self, victim):\n\n attac = random.choice(self.attacks)\n attack = attac[0]\n damage = attac[1]\n victim.health = mb_subs.subtract_to_floor(victim.health, damage)\n\n if damage >= 0:\n commentary = '(OH NO!) %s' % (attack % victim.name)\n else:\n commentary = '(WOW!) %s' % (attack % victim.name)\n return commentary, damage\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
<|reserved_special_token_0|>
print(title)
assert 'Google' == title
browser.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
capabilities = {'browserName': 'firefox', 'browserVersion': '92.0',
'selenoid:options': {'enableVNC': True, 'enableVideo': True}}
browser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=capabilities)
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert 'Google' == title
browser.close()
<|reserved_special_token_1|>
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager
import os
from selenium import webdriver
capabilities = {'browserName': 'firefox', 'browserVersion': '92.0',
'selenoid:options': {'enableVNC': True, 'enableVideo': True}}
browser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',
desired_capabilities=capabilities)
browser.get('https://www.google.com')
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert 'Google' == title
browser.close()
<|reserved_special_token_1|>
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager
import os
# caps = {'browserName': os.getenv('BROWSER', 'firefox')}
# browser = webdriver.Remote(
# command_executor='http://localhost:4444/wd/hub',
# desired_capabilities=caps
# )
from selenium import webdriver
capabilities = {
"browserName": "firefox",
"browserVersion": "92.0",
"selenoid:options": {
"enableVNC": True,
"enableVideo": True
}
}
browser = webdriver.Remote(
command_executor="http://localhost:4444/wd/hub",
desired_capabilities=capabilities)
browser.get("https://www.google.com")
time.sleep(3)
browser.maximize_window()
title = browser.title
print(title)
assert "Google" == title
browser.close()
#browser.quit()
|
flexible
|
{
"blob_id": "d84641ce2854d4af26cd46abbe9557d6006cfc2e",
"index": 681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nbrowser.get('https://www.google.com')\ntime.sleep(3)\nbrowser.maximize_window()\n<mask token>\nprint(title)\nassert 'Google' == title\nbrowser.close()\n",
"step-3": "<mask token>\ncapabilities = {'browserName': 'firefox', 'browserVersion': '92.0',\n 'selenoid:options': {'enableVNC': True, 'enableVideo': True}}\nbrowser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',\n desired_capabilities=capabilities)\nbrowser.get('https://www.google.com')\ntime.sleep(3)\nbrowser.maximize_window()\ntitle = browser.title\nprint(title)\nassert 'Google' == title\nbrowser.close()\n",
"step-4": "import time\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nimport os\nfrom selenium import webdriver\ncapabilities = {'browserName': 'firefox', 'browserVersion': '92.0',\n 'selenoid:options': {'enableVNC': True, 'enableVideo': True}}\nbrowser = webdriver.Remote(command_executor='http://localhost:4444/wd/hub',\n desired_capabilities=capabilities)\nbrowser.get('https://www.google.com')\ntime.sleep(3)\nbrowser.maximize_window()\ntitle = browser.title\nprint(title)\nassert 'Google' == title\nbrowser.close()\n",
"step-5": "import time\n\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager\nimport os\n\n\n# caps = {'browserName': os.getenv('BROWSER', 'firefox')}\n# browser = webdriver.Remote(\n# command_executor='http://localhost:4444/wd/hub',\n# desired_capabilities=caps\n# )\n\nfrom selenium import webdriver\n\ncapabilities = {\n \"browserName\": \"firefox\",\n \"browserVersion\": \"92.0\",\n \"selenoid:options\": {\n \"enableVNC\": True,\n \"enableVideo\": True\n }\n}\n\nbrowser = webdriver.Remote(\n command_executor=\"http://localhost:4444/wd/hub\",\n desired_capabilities=capabilities)\nbrowser.get(\"https://www.google.com\")\ntime.sleep(3)\nbrowser.maximize_window()\n\ntitle = browser.title\n\nprint(title)\n\nassert \"Google\" == title\n\nbrowser.close()\n\n#browser.quit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(index=index, body={'query': {'bool': {'filter': [{
'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(
dois), '_source': False}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(index=index, request_cache=False, body={'query': {
'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [
'doi'], '_source': True}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(
f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'
)
body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':
'proceed', 'inline':
'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'
, 'params': {'local_affiliations': local_affiliations}}, 'query': {
'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}
es.update_by_query(index=index, body=body, request_timeout=60 * 5)
@exception_handler
def delete_index(index: str) ->None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
<|reserved_special_token_0|>
def get_analyzers() ->dict:
return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',
'french_elision', 'icu_folding']}}
<|reserved_special_token_0|>
@exception_handler
def reset_index(index: str) ->None:
es = get_client()
delete_index(index)
settings = {'analysis': {'filter': get_filters(), 'analyzer':
get_analyzers()}}
dynamic_match = None
if 'bso-publications' in index:
dynamic_match = None
elif 'publications-' in index:
dynamic_match = '*authors'
mappings = {'properties': {}}
for f in ['title', 'affiliations.name', 'authors.first_name',
'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}
if dynamic_match:
mappings['dynamic_templates'] = [{'objects': {'match':
dynamic_match, 'match_mapping_type': 'object', 'mapping': {
'type': 'nested'}}}]
response = es.indices.create(index=index, body={'settings': settings,
'mappings': mappings}, ignore=400)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) ->list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions,
chunk_size=500, request_timeout=60, raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(index=index, body={'query': {'bool': {'filter': [{
'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(
dois), '_source': False}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(index=index, request_cache=False, body={'query': {
'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [
'doi'], '_source': True}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(
f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'
)
body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':
'proceed', 'inline':
'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'
, 'params': {'local_affiliations': local_affiliations}}, 'query': {
'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}
es.update_by_query(index=index, body=body, request_timeout=60 * 5)
@exception_handler
def delete_index(index: str) ->None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
<|reserved_special_token_0|>
def get_analyzers() ->dict:
return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',
'french_elision', 'icu_folding']}}
def get_filters() ->dict:
return {'french_elision': {'type': 'elision', 'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',
'quoiqu', 'lorsqu', 'puisqu']}}
@exception_handler
def reset_index(index: str) ->None:
es = get_client()
delete_index(index)
settings = {'analysis': {'filter': get_filters(), 'analyzer':
get_analyzers()}}
dynamic_match = None
if 'bso-publications' in index:
dynamic_match = None
elif 'publications-' in index:
dynamic_match = '*authors'
mappings = {'properties': {}}
for f in ['title', 'affiliations.name', 'authors.first_name',
'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}
if dynamic_match:
mappings['dynamic_templates'] = [{'objects': {'match':
dynamic_match, 'match_mapping_type': 'object', 'mapping': {
'type': 'nested'}}}]
response = es.indices.create(index=index, body={'settings': settings,
'mappings': mappings}, ignore=400)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) ->list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions,
chunk_size=500, request_timeout=60, raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@exception_handler
def get_client():
global client
if client is None:
client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,
ES_PASSWORD_BSO_BACK))
return client
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(index=index, body={'query': {'bool': {'filter': [{
'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(
dois), '_source': False}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(index=index, request_cache=False, body={'query': {
'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [
'doi'], '_source': True}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(
f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'
)
body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':
'proceed', 'inline':
'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'
, 'params': {'local_affiliations': local_affiliations}}, 'query': {
'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}
es.update_by_query(index=index, body=body, request_timeout=60 * 5)
@exception_handler
def delete_index(index: str) ->None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
<|reserved_special_token_0|>
def get_analyzers() ->dict:
return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',
'french_elision', 'icu_folding']}}
def get_filters() ->dict:
return {'french_elision': {'type': 'elision', 'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',
'quoiqu', 'lorsqu', 'puisqu']}}
@exception_handler
def reset_index(index: str) ->None:
es = get_client()
delete_index(index)
settings = {'analysis': {'filter': get_filters(), 'analyzer':
get_analyzers()}}
dynamic_match = None
if 'bso-publications' in index:
dynamic_match = None
elif 'publications-' in index:
dynamic_match = '*authors'
mappings = {'properties': {}}
for f in ['title', 'affiliations.name', 'authors.first_name',
'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}
if dynamic_match:
mappings['dynamic_templates'] = [{'objects': {'match':
dynamic_match, 'match_mapping_type': 'object', 'mapping': {
'type': 'nested'}}}]
response = es.indices.create(index=index, body={'settings': settings,
'mappings': mappings}, ignore=400)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) ->list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions,
chunk_size=500, request_timeout=60, raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
<|reserved_special_token_1|>
<|reserved_special_token_0|>
client = None
logger = get_logger(__name__)
@exception_handler
def get_client():
global client
if client is None:
client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,
ES_PASSWORD_BSO_BACK))
return client
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(index=index, body={'query': {'bool': {'filter': [{
'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(
dois), '_source': False}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(index=index, request_cache=False, body={'query': {
'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [
'doi'], '_source': True}, request_timeout=60 * 5)
existing_dois = set([e['fields']['doi'][0] for e in results['hits'][
'hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(
f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'
)
body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':
'proceed', 'inline':
'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'
, 'params': {'local_affiliations': local_affiliations}}, 'query': {
'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}
es.update_by_query(index=index, body=body, request_timeout=60 * 5)
@exception_handler
def delete_index(index: str) ->None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
@exception_handler
def update_alias(alias: str, old_index: str, new_index: str) ->None:
es = get_client()
logger.debug(f'updating alias {alias} from {old_index} to {new_index}')
response = es.indices.update_aliases({'actions': [{'remove': {'index':
old_index, 'alias': alias}}, {'add': {'index': new_index, 'alias':
alias}}]})
logger.debug(response)
def get_analyzers() ->dict:
return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',
'french_elision', 'icu_folding']}}
def get_filters() ->dict:
return {'french_elision': {'type': 'elision', 'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',
'quoiqu', 'lorsqu', 'puisqu']}}
@exception_handler
def reset_index(index: str) ->None:
es = get_client()
delete_index(index)
settings = {'analysis': {'filter': get_filters(), 'analyzer':
get_analyzers()}}
dynamic_match = None
if 'bso-publications' in index:
dynamic_match = None
elif 'publications-' in index:
dynamic_match = '*authors'
mappings = {'properties': {}}
for f in ['title', 'affiliations.name', 'authors.first_name',
'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}
if dynamic_match:
mappings['dynamic_templates'] = [{'objects': {'match':
dynamic_match, 'match_mapping_type': 'object', 'mapping': {
'type': 'nested'}}}]
response = es.indices.create(index=index, body={'settings': settings,
'mappings': mappings}, ignore=400)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) ->list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions,
chunk_size=500, request_timeout=60, raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
<|reserved_special_token_1|>
from elasticsearch import Elasticsearch, helpers
from bso.server.main.config import ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK, ES_URL
from bso.server.main.decorator import exception_handler
from bso.server.main.logger import get_logger
client = None
logger = get_logger(__name__)
@exception_handler
def get_client():
global client
if client is None:
client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK))
return client
@exception_handler
def get_doi_not_in_index(index, dois):
es = get_client()
results = es.search(
index=index,
body={"query": {"bool": {"filter": [{'terms': {'doi.keyword': dois}}]}}, "fields": ['doi'], "size": len(dois),
"_source": False},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set(dois) - existing_dois
res = []
for doi in list(not_indexed_dois):
res += get_doi_not_in_index_one(index, doi)
logger.debug(f'{len(res)} dois not in index detected')
return res
@exception_handler
def get_doi_not_in_index_one(index, doi):
es = get_client()
results = es.search(
index=index,
request_cache=False,
body={"query": {"bool": {"filter": [{'term': {'doi.keyword': doi}}]}}, "fields": ['doi'], "_source": True},
request_timeout=60*5
)
existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])
not_indexed_dois = set([doi]) - existing_dois
return list(not_indexed_dois)
@exception_handler
def update_local_affiliations(index, current_dois, local_affiliations):
es = get_client()
logger.debug(f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois')
body = {
"script": {
"lang": "painless",
"refresh": True,
"conflicts": "proceed",
"inline": "if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations ="
" new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);"
"ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct()"
".sorted().collect(Collectors.toList())",
"params": {"local_affiliations": local_affiliations}
},
"query": {
"bool": {
"filter": [{
"terms": {
"doi.keyword": current_dois
}
}]
}
}
}
es.update_by_query(index=index, body=body, request_timeout=60*5)
@exception_handler
def delete_index(index: str) -> None:
logger.debug(f'Deleting {index}')
es = get_client()
response = es.indices.delete(index=index, ignore=[400, 404])
logger.debug(response)
@exception_handler
def update_alias(alias: str, old_index: str, new_index: str) -> None:
es = get_client()
logger.debug(f'updating alias {alias} from {old_index} to {new_index}')
response = es.indices.update_aliases({
'actions': [
{'remove': {'index': old_index, 'alias': alias}},
{'add': {'index': new_index, 'alias': alias}}
]
})
logger.debug(response)
def get_analyzers() -> dict:
return {
'light': {
'tokenizer': 'icu_tokenizer',
'filter': [
'lowercase',
'french_elision',
'icu_folding'
]
}
}
def get_filters() -> dict:
return {
'french_elision': {
'type': 'elision',
'articles_case': True,
'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu']
}
}
@exception_handler
def reset_index(index: str) -> None:
es = get_client()
delete_index(index)
settings = {
'analysis': {
'filter': get_filters(),
'analyzer': get_analyzers()
}
}
dynamic_match = None
if 'bso-publications' in index:
# dynamic_match = "*oa_locations"
dynamic_match = None
elif 'publications-' in index:
dynamic_match = "*authors"
mappings = { 'properties': {} }
# attention l'analyzer .keyword ne sera pas présent pour ce champs !
for f in ['title', 'affiliations.name', 'authors.first_name', 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:
mappings['properties'][f] = {
'type': 'text',
'analyzer': 'light'
}
if dynamic_match:
mappings["dynamic_templates"] = [
{
"objects": {
"match": dynamic_match,
"match_mapping_type": "object",
"mapping": {
"type": "nested"
}
}
}
]
response = es.indices.create(
index=index,
body={'settings': settings, 'mappings': mappings},
ignore=400 # ignore 400 already exists code
)
if 'acknowledged' in response and response['acknowledged']:
response = str(response['index'])
logger.debug(f'Index mapping success for index: {response}')
@exception_handler
def load_in_es(data: list, index: str) -> list:
es = get_client()
actions = [{'_index': index, '_source': datum} for datum in data]
ix = 0
indexed = []
for success, info in helpers.parallel_bulk(client=es, actions=actions, chunk_size=500, request_timeout=60,
raise_on_error=False):
if not success:
logger.debug(f'A document failed: {info}')
else:
indexed.append(data[ix])
ix += 1
logger.debug(f'{len(data)} elements imported into {index}')
return indexed
|
flexible
|
{
"blob_id": "9f760c0cf2afc746a1fc19ac68d1b2f406c7efe1",
"index": 5767,
"step-1": "<mask token>\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\n<mask token>\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-2": "<mask token>\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-3": "<mask token>\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,\n ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n<mask token>\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-4": "<mask token>\nclient = None\nlogger = get_logger(__name__)\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK,\n ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(index=index, body={'query': {'bool': {'filter': [{\n 'terms': {'doi.keyword': dois}}]}}, 'fields': ['doi'], 'size': len(\n dois), '_source': False}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(index=index, request_cache=False, body={'query': {\n 'bool': {'filter': [{'term': {'doi.keyword': doi}}]}}, 'fields': [\n 'doi'], '_source': True}, request_timeout=60 * 5)\n existing_dois = set([e['fields']['doi'][0] for e in results['hits'][\n 'hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(\n f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois'\n )\n body = {'script': {'lang': 'painless', 'refresh': True, 'conflicts':\n 'proceed', 'inline':\n 'if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations = new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct().sorted().collect(Collectors.toList())'\n , 'params': {'local_affiliations': local_affiliations}}, 'query': {\n 'bool': {'filter': [{'terms': {'doi.keyword': current_dois}}]}}}\n es.update_by_query(index=index, body=body, request_timeout=60 * 5)\n\n\n@exception_handler\ndef delete_index(index: str) ->None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n@exception_handler\ndef update_alias(alias: str, old_index: str, new_index: str) ->None:\n es = get_client()\n logger.debug(f'updating alias {alias} from {old_index} to {new_index}')\n response = es.indices.update_aliases({'actions': [{'remove': {'index':\n old_index, 'alias': alias}}, {'add': {'index': new_index, 'alias':\n alias}}]})\n logger.debug(response)\n\n\ndef get_analyzers() ->dict:\n return {'light': {'tokenizer': 'icu_tokenizer', 'filter': ['lowercase',\n 'french_elision', 'icu_folding']}}\n\n\ndef get_filters() ->dict:\n return {'french_elision': {'type': 'elision', 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu',\n 'quoiqu', 'lorsqu', 'puisqu']}}\n\n\n@exception_handler\ndef reset_index(index: str) ->None:\n es = get_client()\n delete_index(index)\n settings = {'analysis': {'filter': get_filters(), 'analyzer':\n get_analyzers()}}\n dynamic_match = None\n if 'bso-publications' in index:\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = '*authors'\n mappings = {'properties': {}}\n for f in ['title', 'affiliations.name', 'authors.first_name',\n 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = {'type': 'text', 'analyzer': 'light'}\n if dynamic_match:\n mappings['dynamic_templates'] = [{'objects': {'match':\n dynamic_match, 'match_mapping_type': 'object', 'mapping': {\n 'type': 'nested'}}}]\n response = es.indices.create(index=index, body={'settings': settings,\n 'mappings': mappings}, ignore=400)\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) ->list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions,\n chunk_size=500, request_timeout=60, raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-5": "from elasticsearch import Elasticsearch, helpers\n\nfrom bso.server.main.config import ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK, ES_URL\nfrom bso.server.main.decorator import exception_handler\nfrom bso.server.main.logger import get_logger\n\nclient = None\nlogger = get_logger(__name__)\n\n\n@exception_handler\ndef get_client():\n global client\n if client is None:\n client = Elasticsearch(ES_URL, http_auth=(ES_LOGIN_BSO_BACK, ES_PASSWORD_BSO_BACK))\n return client\n\n\n@exception_handler\ndef get_doi_not_in_index(index, dois):\n es = get_client()\n results = es.search(\n index=index,\n body={\"query\": {\"bool\": {\"filter\": [{'terms': {'doi.keyword': dois}}]}}, \"fields\": ['doi'], \"size\": len(dois),\n \"_source\": False},\n request_timeout=60*5\n )\n existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])\n not_indexed_dois = set(dois) - existing_dois\n res = []\n for doi in list(not_indexed_dois):\n res += get_doi_not_in_index_one(index, doi)\n logger.debug(f'{len(res)} dois not in index detected')\n return res\n\n\n@exception_handler\ndef get_doi_not_in_index_one(index, doi):\n es = get_client()\n results = es.search(\n index=index,\n request_cache=False,\n body={\"query\": {\"bool\": {\"filter\": [{'term': {'doi.keyword': doi}}]}}, \"fields\": ['doi'], \"_source\": True},\n request_timeout=60*5\n )\n existing_dois = set([e['fields']['doi'][0] for e in results['hits']['hits']])\n not_indexed_dois = set([doi]) - existing_dois\n return list(not_indexed_dois)\n\n\n@exception_handler\ndef update_local_affiliations(index, current_dois, local_affiliations):\n es = get_client()\n logger.debug(f'updating with local affiliations {local_affiliations} for {len(current_dois)} dois')\n body = {\n \"script\": {\n \"lang\": \"painless\",\n \"refresh\": True,\n \"conflicts\": \"proceed\",\n \"inline\": \"if (ctx._source.bso_local_affiliations == null) {ctx._source.bso_local_affiliations =\"\n \" new ArrayList();} ctx._source.bso_local_affiliations.addAll(params.local_affiliations);\"\n \"ctx._source.bso_local_affiliations = ctx._source.bso_local_affiliations.stream().distinct()\"\n \".sorted().collect(Collectors.toList())\",\n \"params\": {\"local_affiliations\": local_affiliations}\n },\n \"query\": {\n \"bool\": {\n \"filter\": [{\n \"terms\": {\n \"doi.keyword\": current_dois\n }\n }]\n }\n }\n }\n es.update_by_query(index=index, body=body, request_timeout=60*5)\n\n\n@exception_handler\ndef delete_index(index: str) -> None:\n logger.debug(f'Deleting {index}')\n es = get_client()\n response = es.indices.delete(index=index, ignore=[400, 404])\n logger.debug(response)\n\n\n@exception_handler\ndef update_alias(alias: str, old_index: str, new_index: str) -> None:\n es = get_client()\n logger.debug(f'updating alias {alias} from {old_index} to {new_index}')\n response = es.indices.update_aliases({\n 'actions': [\n {'remove': {'index': old_index, 'alias': alias}},\n {'add': {'index': new_index, 'alias': alias}}\n ]\n })\n logger.debug(response)\n\ndef get_analyzers() -> dict:\n return {\n 'light': {\n 'tokenizer': 'icu_tokenizer',\n 'filter': [\n 'lowercase',\n 'french_elision',\n 'icu_folding'\n ]\n }\n }\n\ndef get_filters() -> dict:\n return {\n 'french_elision': {\n 'type': 'elision',\n 'articles_case': True,\n 'articles': ['l', 'm', 't', 'qu', 'n', 's', 'j', 'd', 'c', 'jusqu', 'quoiqu', 'lorsqu', 'puisqu']\n }\n }\n\n@exception_handler\ndef reset_index(index: str) -> None:\n es = get_client()\n delete_index(index)\n \n settings = {\n 'analysis': {\n 'filter': get_filters(),\n 'analyzer': get_analyzers()\n }\n }\n \n dynamic_match = None\n if 'bso-publications' in index:\n # dynamic_match = \"*oa_locations\"\n dynamic_match = None\n elif 'publications-' in index:\n dynamic_match = \"*authors\"\n\n mappings = { 'properties': {} }\n # attention l'analyzer .keyword ne sera pas présent pour ce champs !\n for f in ['title', 'affiliations.name', 'authors.first_name', 'authors.last_name', 'authors.full_name', 'authors.affiliations.name']:\n mappings['properties'][f] = { \n 'type': 'text',\n 'analyzer': 'light' \n }\n\n if dynamic_match:\n mappings[\"dynamic_templates\"] = [\n {\n \"objects\": {\n \"match\": dynamic_match,\n \"match_mapping_type\": \"object\",\n \"mapping\": {\n \"type\": \"nested\"\n }\n }\n }\n ]\n response = es.indices.create(\n index=index,\n body={'settings': settings, 'mappings': mappings},\n ignore=400 # ignore 400 already exists code\n )\n if 'acknowledged' in response and response['acknowledged']:\n response = str(response['index'])\n logger.debug(f'Index mapping success for index: {response}')\n\n\n@exception_handler\ndef load_in_es(data: list, index: str) -> list:\n es = get_client()\n actions = [{'_index': index, '_source': datum} for datum in data]\n ix = 0\n indexed = []\n for success, info in helpers.parallel_bulk(client=es, actions=actions, chunk_size=500, request_timeout=60,\n raise_on_error=False):\n if not success:\n logger.debug(f'A document failed: {info}')\n else:\n indexed.append(data[ix])\n ix += 1\n logger.debug(f'{len(data)} elements imported into {index}')\n return indexed\n",
"step-ids": [
7,
8,
9,
11,
13
]
}
|
[
7,
8,
9,
11,
13
] |
<|reserved_special_token_0|>
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time, gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
xs.append(float(x))
ys.append(float(y))
else:
break
print(xs, ys)
ax1.clear()
ax1.plot(xs, ys)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s.bind((HOST, PORT))
s.listen(5)
<|reserved_special_token_0|>
ax1.set_ylim(-0.1, 1.1)
ax1.set_xlim(0, 2)
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time, gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
xs.append(float(x))
ys.append(float(y))
else:
break
print(xs, ys)
ax1.clear()
ax1.plot(xs, ys)
<|reserved_special_token_0|>
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
time_list = []
gain_list = []
HOST = '127.0.0.1'
PORT = 65432
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
conn, addr = s.accept()
fig, ax1 = plt.subplots()
ax1.set_ylim(-0.1, 1.1)
ax1.set_xlim(0, 2)
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time, gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
xs.append(float(x))
ys.append(float(y))
else:
break
print(xs, ys)
ax1.clear()
ax1.plot(xs, ys)
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
<|reserved_special_token_1|>
import socket
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
from matplotlib import style
import pickle
time_list = []
gain_list = []
HOST = '127.0.0.1'
PORT = 65432
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
conn, addr = s.accept()
fig, ax1 = plt.subplots()
ax1.set_ylim(-0.1, 1.1)
ax1.set_xlim(0, 2)
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time, gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
xs.append(float(x))
ys.append(float(y))
else:
break
print(xs, ys)
ax1.clear()
ax1.plot(xs, ys)
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
<|reserved_special_token_1|>
#!/usr/bin/env python
import socket
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.animation import FuncAnimation
from matplotlib import style
import pickle
# Create figure for plotting
time_list = []
gain_list = []
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 65432 # Port to listen on (non-privileged ports are > 1023)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
conn, addr = s.accept()
fig, ax1 = plt.subplots()
ax1.set_ylim(-.1, 1.1)
ax1.set_xlim(0, 2)
def recieve_data():
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
msg = pickle.loads(data)
time = float(msg[0])
gain = float(msg[1])
yield time , gain
conn.close()
def animate(i):
xs = []
ys = []
for line in recieve_data():
if len(xs) < 50:
x, y = line
#print(x,y)
xs.append(float(x))
ys.append(float(y))
else:break
print(xs,ys)
ax1.clear()
ax1.plot(xs, ys)
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
|
flexible
|
{
"blob_id": "a4d5064decdc9963dae1712c7c6918b3e5902bf2",
"index": 9825,
"step-1": "<mask token>\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\n<mask token>\n",
"step-2": "<mask token>\ns.bind((HOST, PORT))\ns.listen(5)\n<mask token>\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\n<mask token>\nplt.show()\n",
"step-3": "<mask token>\ntime_list = []\ngain_list = []\nHOST = '127.0.0.1'\nPORT = 65432\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\nfig, ax1 = plt.subplots()\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n",
"step-4": "import socket\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib import style\nimport pickle\ntime_list = []\ngain_list = []\nHOST = '127.0.0.1'\nPORT = 65432\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\nfig, ax1 = plt.subplots()\nax1.set_ylim(-0.1, 1.1)\nax1.set_xlim(0, 2)\n\n\ndef recieve_data():\n while True:\n data = conn.recv(1024)\n if not data:\n break\n conn.sendall(data)\n msg = pickle.loads(data)\n time = float(msg[0])\n gain = float(msg[1])\n yield time, gain\n conn.close()\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n xs.append(float(x))\n ys.append(float(y))\n else:\n break\n print(xs, ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n",
"step-5": "#!/usr/bin/env python\n\nimport socket\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib import style\nimport pickle\n# Create figure for plotting\n\ntime_list = []\ngain_list = []\n\nHOST = '127.0.0.1' # Standard loopback interface address (localhost)\nPORT = 65432 # Port to listen on (non-privileged ports are > 1023)\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\ns.listen(5)\nconn, addr = s.accept()\n\n\nfig, ax1 = plt.subplots()\nax1.set_ylim(-.1, 1.1)\nax1.set_xlim(0, 2)\n\ndef recieve_data():\n\twhile True:\n\t\t data = conn.recv(1024)\n\t\t if not data:\n\t\t\t break\n\t\t conn.sendall(data)\n\t\t msg = pickle.loads(data)\n\t\t time = float(msg[0])\n\t\t gain = float(msg[1])\n\t\t yield time , gain\n\tconn.close()\n\n\n\ndef animate(i):\n xs = []\n ys = []\n for line in recieve_data():\n if len(xs) < 50:\n x, y = line\n #print(x,y)\n xs.append(float(x))\n ys.append(float(y))\n else:break\n print(xs,ys)\n ax1.clear()\n ax1.plot(xs, ys)\n\nani = animation.FuncAnimation(fig, animate, interval=10)\nplt.show()\n\n\n\n\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
def get_value(li, row, column):
if row < 0 or column < 0:
return 0
try:
return li[row][column]
except IndexError:
return 0
n = int(input())
results = {}
for asdf in range(n):
table = []
title, rows, columns = input().split()
rows = int(rows)
columns = int(columns)
for r in range(rows):
table.append([int(x) for x in input().split()])
flattened = [j for sub in table for j in sub]
sort = sorted(range(len(flattened)), key=lambda k: flattened[k])
distance = [[0 for i in range(columns)] for j in range(rows)]
#print(sort)
maxdist = 0
for i in sort:
r = i//columns
c = i % columns
#print(r)
#print(c)
w = 1
x = 1
y = 1
z = 1
if get_value(table, r, c) == get_value(table, r-1, c):
w = 0
if get_value(table, r, c) == get_value(table, r+1, c):
x = 0
if get_value(table, r, c) == get_value(table, r, c-1):
y = 0
if get_value(table, r, c) == get_value(table, r, c+1):
z = 0
#print(distance)
distance[r][c] = max(max(get_value(distance, r-1, c)*w, get_value(distance, r+1, c)*x),
max(get_value(distance, r, c-1)*y, get_value(distance, r, c+1)*z)) + 1
if distance[r][c] > maxdist:
maxdist = distance[r][c]
results[title] = maxdist
for key in results:
print(key + ": " + str(results[key]))
|
normal
|
{
"blob_id": "badbfdbdeb8b4fd40b1c44bf7dcff6457a0c8795",
"index": 7162,
"step-1": "<mask token>\n",
"step-2": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\n<mask token>\n",
"step-3": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\n<mask token>\nfor asdf in range(n):\n table = []\n title, rows, columns = input().split()\n rows = int(rows)\n columns = int(columns)\n for r in range(rows):\n table.append([int(x) for x in input().split()])\n flattened = [j for sub in table for j in sub]\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\n distance = [[(0) for i in range(columns)] for j in range(rows)]\n maxdist = 0\n for i in sort:\n r = i // columns\n c = i % columns\n w = 1\n x = 1\n y = 1\n z = 1\n if get_value(table, r, c) == get_value(table, r - 1, c):\n w = 0\n if get_value(table, r, c) == get_value(table, r + 1, c):\n x = 0\n if get_value(table, r, c) == get_value(table, r, c - 1):\n y = 0\n if get_value(table, r, c) == get_value(table, r, c + 1):\n z = 0\n distance[r][c] = max(max(get_value(distance, r - 1, c) * w, \n get_value(distance, r + 1, c) * x), max(get_value(distance, r, \n c - 1) * y, get_value(distance, r, c + 1) * z)) + 1\n if distance[r][c] > maxdist:\n maxdist = distance[r][c]\n results[title] = maxdist\nfor key in results:\n print(key + ': ' + str(results[key]))\n",
"step-4": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\nn = int(input())\nresults = {}\nfor asdf in range(n):\n table = []\n title, rows, columns = input().split()\n rows = int(rows)\n columns = int(columns)\n for r in range(rows):\n table.append([int(x) for x in input().split()])\n flattened = [j for sub in table for j in sub]\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\n distance = [[(0) for i in range(columns)] for j in range(rows)]\n maxdist = 0\n for i in sort:\n r = i // columns\n c = i % columns\n w = 1\n x = 1\n y = 1\n z = 1\n if get_value(table, r, c) == get_value(table, r - 1, c):\n w = 0\n if get_value(table, r, c) == get_value(table, r + 1, c):\n x = 0\n if get_value(table, r, c) == get_value(table, r, c - 1):\n y = 0\n if get_value(table, r, c) == get_value(table, r, c + 1):\n z = 0\n distance[r][c] = max(max(get_value(distance, r - 1, c) * w, \n get_value(distance, r + 1, c) * x), max(get_value(distance, r, \n c - 1) * y, get_value(distance, r, c + 1) * z)) + 1\n if distance[r][c] > maxdist:\n maxdist = distance[r][c]\n results[title] = maxdist\nfor key in results:\n print(key + ': ' + str(results[key]))\n",
"step-5": "def get_value(li, row, column):\r\n if row < 0 or column < 0:\r\n return 0\r\n try:\r\n return li[row][column]\r\n except IndexError:\r\n return 0\r\n\r\n\r\nn = int(input())\r\nresults = {}\r\nfor asdf in range(n):\r\n table = []\r\n title, rows, columns = input().split()\r\n rows = int(rows)\r\n columns = int(columns)\r\n\r\n for r in range(rows):\r\n table.append([int(x) for x in input().split()])\r\n\r\n flattened = [j for sub in table for j in sub]\r\n\r\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\r\n\r\n distance = [[0 for i in range(columns)] for j in range(rows)]\r\n #print(sort)\r\n maxdist = 0\r\n for i in sort:\r\n r = i//columns\r\n c = i % columns\r\n #print(r)\r\n #print(c)\r\n w = 1\r\n x = 1\r\n y = 1\r\n z = 1\r\n if get_value(table, r, c) == get_value(table, r-1, c):\r\n w = 0\r\n if get_value(table, r, c) == get_value(table, r+1, c):\r\n x = 0\r\n if get_value(table, r, c) == get_value(table, r, c-1):\r\n y = 0\r\n if get_value(table, r, c) == get_value(table, r, c+1):\r\n z = 0\r\n #print(distance)\r\n distance[r][c] = max(max(get_value(distance, r-1, c)*w, get_value(distance, r+1, c)*x),\r\n max(get_value(distance, r, c-1)*y, get_value(distance, r, c+1)*z)) + 1\r\n if distance[r][c] > maxdist:\r\n maxdist = distance[r][c]\r\n results[title] = maxdist\r\n\r\nfor key in results:\r\n print(key + \": \" + str(results[key])) \r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Blueprint
application_vue_demo = Blueprint('application_vue_demo', __name__)
from . import views
|
normal
|
{
"blob_id": "a33abd253288140f8051aced1d0ed1e41b2fc786",
"index": 8067,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napplication_vue_demo = Blueprint('application_vue_demo', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\napplication_vue_demo = Blueprint('application_vue_demo', __name__)\nfrom . import views\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(seed)
def get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
<|reserved_special_token_0|>
def generate_training_input(mol_file):
"""
:param mol_file: str
:return: pd.DataFrame
"""
ifs = oechem.oemolistream(mol_file)
training_data = []
for mol in ifs.GetOEGraphMols():
energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))
sf_elements = get_sf_elements(mol)
dihe_inchi = get_dihedral_inchi_key(mol)
data = [dihe_inchi, energy]
data.extend(sf_elements)
training_data.append(data)
ifs.close()
columns = [INCHI_KEY, ENERGY_KEY]
num_sf_elements = len(training_data[0]) - 2
sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]
columns.extend(sf_columns)
df = pd.DataFrame(training_data, columns=columns)
grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)
df2 = grouped.transform(lambda x: x - x.min())
df[ENERGY_KEY] = df2[ENERGY_KEY]
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Train neural network model to predict torsional relative energy')
parser.add_argument('--input', type=str, help=
'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'
)
parser.add_argument('--num_epoch', default=5000, type=int, help=
'number of epoch (default = 2000)')
parser.add_argument('--batch_size', default=256, type=int, help=
'batch size (default: 256)')
parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,
help='layer dimensions')
parser.add_argument('--lr', default=0.0001, type=float, help=
'learning rate (default: 1e-r)')
parser.add_argument('--dropout', default=0.2, type=float, help=
'dropout (default: 0.2)')
parser.add_argument('--val_split', default=0.1, type=float, help=
'validation split (default: 0.1)')
parser.add_argument('--scalar', default='scaler.pkl', type=str, help=
'output file with standard scaler')
parser.add_argument('--model', default='model.h5', type=str, help=
'output file with trained model')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
input_file = args.input
num_epoch = args.num_epoch
batch_size = args.batch_size
lr = args.lr
dropout = args.dropout
layer_dims = args.layer_dims
df = generate_training_input(input_file)
tmp_idx = df.ENERGY > 30
df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])
dihe_inchis = df[INCHI_KEY].unique()
print('Number of profiles: %d' % len(dihe_inchis))
desc_bgn_idx = df.columns.get_loc('sf_1')
Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])
ytrain = df.ENERGY
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
print('Xtrain.shape ', Xtrain.shape)
with open(args.scalar, 'wb') as fptr:
pickle.dump(scaler, fptr)
_, num_feat = Xtrain.shape
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience
=100, verbose=1, mode='auto')
model_file = args.model
model = get_model(num_feat, lr, dropout, layer_dims)
print(model.summary())
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,
save_best_only=True)
callbacks_list = [checkpointer]
model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,
validation_split=args.val_split, callbacks=callbacks_list, verbose=1)
print('Training complete')
print('Standard scalar is saved in %s' % args.scalar)
print('Model is saved in %s' % args.model)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
seed = 7
np.random.seed(seed)
def get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
ENERGY_KEY = 'ENERGY'
INCHI_KEY = 'Inchi'
def generate_training_input(mol_file):
"""
:param mol_file: str
:return: pd.DataFrame
"""
ifs = oechem.oemolistream(mol_file)
training_data = []
for mol in ifs.GetOEGraphMols():
energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))
sf_elements = get_sf_elements(mol)
dihe_inchi = get_dihedral_inchi_key(mol)
data = [dihe_inchi, energy]
data.extend(sf_elements)
training_data.append(data)
ifs.close()
columns = [INCHI_KEY, ENERGY_KEY]
num_sf_elements = len(training_data[0]) - 2
sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]
columns.extend(sf_columns)
df = pd.DataFrame(training_data, columns=columns)
grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)
df2 = grouped.transform(lambda x: x - x.min())
df[ENERGY_KEY] = df2[ENERGY_KEY]
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Train neural network model to predict torsional relative energy')
parser.add_argument('--input', type=str, help=
'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'
)
parser.add_argument('--num_epoch', default=5000, type=int, help=
'number of epoch (default = 2000)')
parser.add_argument('--batch_size', default=256, type=int, help=
'batch size (default: 256)')
parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,
help='layer dimensions')
parser.add_argument('--lr', default=0.0001, type=float, help=
'learning rate (default: 1e-r)')
parser.add_argument('--dropout', default=0.2, type=float, help=
'dropout (default: 0.2)')
parser.add_argument('--val_split', default=0.1, type=float, help=
'validation split (default: 0.1)')
parser.add_argument('--scalar', default='scaler.pkl', type=str, help=
'output file with standard scaler')
parser.add_argument('--model', default='model.h5', type=str, help=
'output file with trained model')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
input_file = args.input
num_epoch = args.num_epoch
batch_size = args.batch_size
lr = args.lr
dropout = args.dropout
layer_dims = args.layer_dims
df = generate_training_input(input_file)
tmp_idx = df.ENERGY > 30
df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])
dihe_inchis = df[INCHI_KEY].unique()
print('Number of profiles: %d' % len(dihe_inchis))
desc_bgn_idx = df.columns.get_loc('sf_1')
Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])
ytrain = df.ENERGY
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
print('Xtrain.shape ', Xtrain.shape)
with open(args.scalar, 'wb') as fptr:
pickle.dump(scaler, fptr)
_, num_feat = Xtrain.shape
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience
=100, verbose=1, mode='auto')
model_file = args.model
model = get_model(num_feat, lr, dropout, layer_dims)
print(model.summary())
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,
save_best_only=True)
callbacks_list = [checkpointer]
model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,
validation_split=args.val_split, callbacks=callbacks_list, verbose=1)
print('Training complete')
print('Standard scalar is saved in %s' % args.scalar)
print('Model is saved in %s' % args.model)
<|reserved_special_token_1|>
import os, sys
import math
import argparse
import shutil
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense, Dropout, LocallyConnected1D, Activation, GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.wrappers.scikit_learn import KerasRegressor
from keras.utils import multi_gpu_model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import Callback
import timeit
import pickle
from openeye import oechem
from torsion.model import get_sf_elements
from torsion.analysis import get_dihedral_inchi_key
import matplotlib.pyplot as plt
seed = 7
np.random.seed(seed)
def get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
ENERGY_KEY = 'ENERGY'
INCHI_KEY = 'Inchi'
def generate_training_input(mol_file):
"""
:param mol_file: str
:return: pd.DataFrame
"""
ifs = oechem.oemolistream(mol_file)
training_data = []
for mol in ifs.GetOEGraphMols():
energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))
sf_elements = get_sf_elements(mol)
dihe_inchi = get_dihedral_inchi_key(mol)
data = [dihe_inchi, energy]
data.extend(sf_elements)
training_data.append(data)
ifs.close()
columns = [INCHI_KEY, ENERGY_KEY]
num_sf_elements = len(training_data[0]) - 2
sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]
columns.extend(sf_columns)
df = pd.DataFrame(training_data, columns=columns)
grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)
df2 = grouped.transform(lambda x: x - x.min())
df[ENERGY_KEY] = df2[ENERGY_KEY]
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Train neural network model to predict torsional relative energy')
parser.add_argument('--input', type=str, help=
'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'
)
parser.add_argument('--num_epoch', default=5000, type=int, help=
'number of epoch (default = 2000)')
parser.add_argument('--batch_size', default=256, type=int, help=
'batch size (default: 256)')
parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,
help='layer dimensions')
parser.add_argument('--lr', default=0.0001, type=float, help=
'learning rate (default: 1e-r)')
parser.add_argument('--dropout', default=0.2, type=float, help=
'dropout (default: 0.2)')
parser.add_argument('--val_split', default=0.1, type=float, help=
'validation split (default: 0.1)')
parser.add_argument('--scalar', default='scaler.pkl', type=str, help=
'output file with standard scaler')
parser.add_argument('--model', default='model.h5', type=str, help=
'output file with trained model')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
input_file = args.input
num_epoch = args.num_epoch
batch_size = args.batch_size
lr = args.lr
dropout = args.dropout
layer_dims = args.layer_dims
df = generate_training_input(input_file)
tmp_idx = df.ENERGY > 30
df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])
dihe_inchis = df[INCHI_KEY].unique()
print('Number of profiles: %d' % len(dihe_inchis))
desc_bgn_idx = df.columns.get_loc('sf_1')
Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])
ytrain = df.ENERGY
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
print('Xtrain.shape ', Xtrain.shape)
with open(args.scalar, 'wb') as fptr:
pickle.dump(scaler, fptr)
_, num_feat = Xtrain.shape
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience
=100, verbose=1, mode='auto')
model_file = args.model
model = get_model(num_feat, lr, dropout, layer_dims)
print(model.summary())
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,
save_best_only=True)
callbacks_list = [checkpointer]
model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,
validation_split=args.val_split, callbacks=callbacks_list, verbose=1)
print('Training complete')
print('Standard scalar is saved in %s' % args.scalar)
print('Model is saved in %s' % args.model)
<|reserved_special_token_1|>
import os, sys
import math
import argparse
import shutil
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense, Dropout, LocallyConnected1D, Activation, \
GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.wrappers.scikit_learn import KerasRegressor
from keras.utils import multi_gpu_model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import Callback
import timeit
import pickle
from openeye import oechem
from torsion.model import get_sf_elements
from torsion.analysis import get_dihedral_inchi_key
import matplotlib.pyplot as plt
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
def get_model(num_feat=294, lr=1e-3, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(
Dense(
int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
ENERGY_KEY = 'ENERGY'
INCHI_KEY = 'Inchi'
def generate_training_input(mol_file):
'''
:param mol_file: str
:return: pd.DataFrame
'''
ifs = oechem.oemolistream(mol_file)
training_data = []
for mol in ifs.GetOEGraphMols():
energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))
sf_elements = get_sf_elements(mol)
dihe_inchi = get_dihedral_inchi_key(mol)
data = [dihe_inchi, energy]
data.extend(sf_elements)
training_data.append(data)
ifs.close()
columns = [INCHI_KEY, ENERGY_KEY]
num_sf_elements = len(training_data[0]) - 2
sf_columns = ['sf_%d'%(i+1) for i in range(num_sf_elements)]
columns.extend(sf_columns)
df = pd.DataFrame(training_data, columns=columns)
# calculate relative energy for each profile
grouped = df.loc[:,[INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)
df2 = grouped.transform(lambda x: x - x.min())
df[ENERGY_KEY] = df2[ENERGY_KEY]
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Train neural network model to predict torsional relative energy')
parser.add_argument('--input', type=str, help='sd file containing MM structures alongwith '
'sd properties with torsion atom indices and QM energy')
parser.add_argument('--num_epoch', default=5000, type=int, help='number of epoch (default = 2000)')
parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')
parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str, help='layer dimensions')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate (default: 1e-r)')
parser.add_argument('--dropout', default=0.2, type=float, help='dropout (default: 0.2)')
parser.add_argument('--val_split', default=0.1, type=float, help='validation split (default: 0.1)')
parser.add_argument('--scalar', default='scaler.pkl', type=str, help='output file with standard scaler')
parser.add_argument('--model', default='model.h5', type=str, help='output file with trained model')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
input_file = args.input
num_epoch = args.num_epoch
batch_size = args.batch_size
lr = args.lr
dropout = args.dropout
layer_dims = args.layer_dims
# generate training data using the molecules in the input file
# for each molecule in the input file, extract the QM energy from SD property "ENERGY"
# and generate symmetry function elements around the specified torsion (SD property "TORSION_ATOMS_FRAGMENT")
df = generate_training_input(input_file)
# cap the relative energy
tmp_idx = df.ENERGY > 30
df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])
dihe_inchis = df[INCHI_KEY].unique()
print('Number of profiles: %d' % len(dihe_inchis))
desc_bgn_idx = df.columns.get_loc('sf_1')
Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])
ytrain = df.ENERGY
# feature transformation
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
print('Xtrain.shape ', Xtrain.shape)
# save feature transformation
with open(args.scalar, 'wb') as fptr:
pickle.dump(scaler, fptr)
_, num_feat = Xtrain.shape
# early stopping criteria
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=100, \
verbose=1, mode='auto')
model_file = args.model
# create DNN model
model = get_model(num_feat, lr, dropout, layer_dims)
print(model.summary())
checkpointer = ModelCheckpoint(
filepath=model_file, verbose=1, save_best_only=True)
callbacks_list = [checkpointer]
# train DNN model
model.fit(
Xtrain,
ytrain,
epochs=num_epoch,
batch_size=batch_size,
validation_split=args.val_split,
callbacks=callbacks_list,
verbose=1)
print('Training complete')
print('Standard scalar is saved in %s' % args.scalar)
print('Model is saved in %s' % args.model)
|
flexible
|
{
"blob_id": "ed35a9bc3dd267c9a5fe76ccbb1b4ac5261fc3c8",
"index": 1993,
"step-1": "<mask token>\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\n<mask token>\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-3": "<mask token>\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-4": "import os, sys\nimport math\nimport argparse\nimport shutil\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import KFold\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LocallyConnected1D, Activation, GaussianNoise, GaussianDropout\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.utils import multi_gpu_model\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.callbacks import Callback\nimport timeit\nimport pickle\nfrom openeye import oechem\nfrom torsion.model import get_sf_elements\nfrom torsion.analysis import get_dihedral_inchi_key\nimport matplotlib.pyplot as plt\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-5": "import os, sys\nimport math\nimport argparse\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import KFold\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LocallyConnected1D, Activation, \\\n GaussianNoise, GaussianDropout\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.utils import multi_gpu_model\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.callbacks import Callback\n\nimport timeit\nimport pickle\n\nfrom openeye import oechem\n\nfrom torsion.model import get_sf_elements\nfrom torsion.analysis import get_dihedral_inchi_key\n\nimport matplotlib.pyplot as plt\n\n# fix random seed for reproducibility\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=1e-3, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n\n model.add(\n Dense(\n int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n\n model.add(Dense(1))\n\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\ndef generate_training_input(mol_file):\n '''\n\n\n :param mol_file: str\n :return: pd.DataFrame\n '''\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n\n ifs.close()\n\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = ['sf_%d'%(i+1) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n\n df = pd.DataFrame(training_data, columns=columns)\n\n # calculate relative energy for each profile\n grouped = df.loc[:,[INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n\n return df\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help='sd file containing MM structures alongwith '\n 'sd properties with torsion atom indices and QM energy')\n parser.add_argument('--num_epoch', default=5000, type=int, help='number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str, help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help='learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help='dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help='validation split (default: 0.1)')\n\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help='output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help='output file with trained model')\n\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n\n input_file = args.input\n\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n\n # generate training data using the molecules in the input file\n # for each molecule in the input file, extract the QM energy from SD property \"ENERGY\"\n # and generate symmetry function elements around the specified torsion (SD property \"TORSION_ATOMS_FRAGMENT\")\n df = generate_training_input(input_file)\n\n # cap the relative energy\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n\n desc_bgn_idx = df.columns.get_loc('sf_1')\n\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n\n # feature transformation\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n\n print('Xtrain.shape ', Xtrain.shape)\n\n # save feature transformation\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n\n _, num_feat = Xtrain.shape\n\n # early stopping criteria\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=100, \\\n verbose=1, mode='auto')\n\n model_file = args.model\n # create DNN model\n model = get_model(num_feat, lr, dropout, layer_dims)\n\n print(model.summary())\n\n checkpointer = ModelCheckpoint(\n filepath=model_file, verbose=1, save_best_only=True)\n callbacks_list = [checkpointer]\n\n # train DNN model\n model.fit(\n Xtrain,\n ytrain,\n epochs=num_epoch,\n batch_size=batch_size,\n validation_split=args.val_split,\n callbacks=callbacks_list,\n verbose=1)\n\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n\n\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import hashlib
hash = 'yzbqklnj'
int = 0
while not hashlib.md5("{}{}".format(hash, int).encode('utf-8')).hexdigest().startswith('000000'):
print("Nope luck for {}{}".format(hash, int))
int += 1
print("Key: {}{}".format(hash, int))
print("Number: {}").format(int)
|
normal
|
{
"blob_id": "9ae9fd6da5c3d519d87af699dd4ea9b564a53d79",
"index": 5481,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile not hashlib.md5('{}{}'.format(hash, int).encode('utf-8')).hexdigest(\n ).startswith('000000'):\n print('Nope luck for {}{}'.format(hash, int))\n int += 1\nprint('Key: {}{}'.format(hash, int))\nprint('Number: {}').format(int)\n",
"step-3": "<mask token>\nhash = 'yzbqklnj'\nint = 0\nwhile not hashlib.md5('{}{}'.format(hash, int).encode('utf-8')).hexdigest(\n ).startswith('000000'):\n print('Nope luck for {}{}'.format(hash, int))\n int += 1\nprint('Key: {}{}'.format(hash, int))\nprint('Number: {}').format(int)\n",
"step-4": "import hashlib\nhash = 'yzbqklnj'\nint = 0\nwhile not hashlib.md5('{}{}'.format(hash, int).encode('utf-8')).hexdigest(\n ).startswith('000000'):\n print('Nope luck for {}{}'.format(hash, int))\n int += 1\nprint('Key: {}{}'.format(hash, int))\nprint('Number: {}').format(int)\n",
"step-5": "import hashlib\n\nhash = 'yzbqklnj'\n\nint = 0\n\nwhile not hashlib.md5(\"{}{}\".format(hash, int).encode('utf-8')).hexdigest().startswith('000000'):\n print(\"Nope luck for {}{}\".format(hash, int))\n int += 1\n\nprint(\"Key: {}{}\".format(hash, int))\nprint(\"Number: {}\").format(int)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def logged_menu(logged_user):
print('Welcome you are logged in as: ' + logged_user.get_username())
while True:
command = input('{}@hackabank# '.format(logged_user.get_username()))
if command == 'info':
print('You are: ' + logged_user.get_username())
print('Your id is: ' + str(logged_user.get_id()))
print('Your balance is:' + str(logged_user.get_balance()) + '$')
elif command == 'changepass':
new_pass = input('Enter your new password: ')
sql_manager.change_pass(new_pass, logged_user)
elif command == 'change-message':
new_message = input('Enter your new message: ')
sql_manager.change_message(new_message, logged_user)
elif command == 'show-message':
print(logged_user.get_message())
elif command == 'help':
print('info - for showing account info')
print('changepass - for changing passowrd')
print('change-message - for changing users message')
print('show-message - for showing users message')
elif command in EXIT_CMD:
break
else:
print('Not such a command!')
continue
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main_menu():
print(
"""Welcome to our bank service. You are not logged in.
Please register or login"""
)
while True:
command = input('guest@hackabank$ ')
if command == 'register':
username = input('Enter your username: ')
password = getpass(prompt='Enter your password: ')
sql_manager.register(username, password)
print('Registration Successfull')
elif command == 'login':
username = input('Enter your username: ')
password = getpass(prompt='Enter your password: ')
logged_user = sql_manager.login(username, password)
if logged_user:
logged_menu(logged_user)
else:
print('Login failed')
continue
elif command == 'help':
print(
"""login - for logging in!
register - for creating new account!
exit - for closing program!"""
)
elif command == 'exit':
break
else:
print('Not a valid command')
continue
def logged_menu(logged_user):
print('Welcome you are logged in as: ' + logged_user.get_username())
while True:
command = input('{}@hackabank# '.format(logged_user.get_username()))
if command == 'info':
print('You are: ' + logged_user.get_username())
print('Your id is: ' + str(logged_user.get_id()))
print('Your balance is:' + str(logged_user.get_balance()) + '$')
elif command == 'changepass':
new_pass = input('Enter your new password: ')
sql_manager.change_pass(new_pass, logged_user)
elif command == 'change-message':
new_message = input('Enter your new message: ')
sql_manager.change_message(new_message, logged_user)
elif command == 'show-message':
print(logged_user.get_message())
elif command == 'help':
print('info - for showing account info')
print('changepass - for changing passowrd')
print('change-message - for changing users message')
print('show-message - for showing users message')
elif command in EXIT_CMD:
break
else:
print('Not such a command!')
continue
<|reserved_special_token_1|>
import sql_manager
import Client
from getpass import getpass
from settings import EXIT_CMD
def main_menu():
print(
"""Welcome to our bank service. You are not logged in.
Please register or login"""
)
while True:
command = input('guest@hackabank$ ')
if command == 'register':
username = input('Enter your username: ')
password = getpass(prompt='Enter your password: ')
sql_manager.register(username, password)
print('Registration Successfull')
elif command == 'login':
username = input('Enter your username: ')
password = getpass(prompt='Enter your password: ')
logged_user = sql_manager.login(username, password)
if logged_user:
logged_menu(logged_user)
else:
print('Login failed')
continue
elif command == 'help':
print(
"""login - for logging in!
register - for creating new account!
exit - for closing program!"""
)
elif command == 'exit':
break
else:
print('Not a valid command')
continue
def logged_menu(logged_user):
print('Welcome you are logged in as: ' + logged_user.get_username())
while True:
command = input('{}@hackabank# '.format(logged_user.get_username()))
if command == 'info':
print('You are: ' + logged_user.get_username())
print('Your id is: ' + str(logged_user.get_id()))
print('Your balance is:' + str(logged_user.get_balance()) + '$')
elif command == 'changepass':
new_pass = input('Enter your new password: ')
sql_manager.change_pass(new_pass, logged_user)
elif command == 'change-message':
new_message = input('Enter your new message: ')
sql_manager.change_message(new_message, logged_user)
elif command == 'show-message':
print(logged_user.get_message())
elif command == 'help':
print('info - for showing account info')
print('changepass - for changing passowrd')
print('change-message - for changing users message')
print('show-message - for showing users message')
elif command in EXIT_CMD:
break
else:
print('Not such a command!')
continue
<|reserved_special_token_1|>
#!/usr/bin/env python3
import sql_manager
import Client
from getpass import getpass
from settings import EXIT_CMD
def main_menu():
print("""Welcome to our bank service. You are not logged in.
Please register or login""")
while True:
command = input("guest@hackabank$ ")
if command == "register":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
sql_manager.register(username, password)
print("Registration Successfull")
elif command == "login":
username = input("Enter your username: ")
password = getpass(prompt="Enter your password: ")
logged_user = sql_manager.login(username, password)
if logged_user:
logged_menu(logged_user)
else:
print("Login failed")
continue
elif command == "help":
print("""login - for logging in!
register - for creating new account!
exit - for closing program!""")
elif command == "exit":
break
else:
print("Not a valid command")
continue
def logged_menu(logged_user):
print("Welcome you are logged in as: " + logged_user.get_username())
while True:
command = input("{}@hackabank# ".format(logged_user.get_username()))
if command == "info":
print("You are: " + logged_user.get_username())
print("Your id is: " + str(logged_user.get_id()))
print("Your balance is:" + str(logged_user.get_balance()) + "$")
elif command == "changepass":
new_pass = input("Enter your new password: ")
sql_manager.change_pass(new_pass, logged_user)
elif command == "change-message":
new_message = input("Enter your new message: ")
sql_manager.change_message(new_message, logged_user)
elif command == "show-message":
print(logged_user.get_message())
elif command == "help":
print("info - for showing account info")
print("changepass - for changing passowrd")
print("change-message - for changing users message")
print("show-message - for showing users message")
elif command in EXIT_CMD:
break
else:
print("Not such a command!")
continue
|
flexible
|
{
"blob_id": "ee4fd4aef7ecdfbc8ff53028fdedc558814f46a7",
"index": 2383,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-3": "<mask token>\n\n\ndef main_menu():\n print(\n \"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\"\n )\n while True:\n command = input('guest@hackabank$ ')\n if command == 'register':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n sql_manager.register(username, password)\n print('Registration Successfull')\n elif command == 'login':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n logged_user = sql_manager.login(username, password)\n if logged_user:\n logged_menu(logged_user)\n else:\n print('Login failed')\n continue\n elif command == 'help':\n print(\n \"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\"\n )\n elif command == 'exit':\n break\n else:\n print('Not a valid command')\n continue\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-4": "import sql_manager\nimport Client\nfrom getpass import getpass\nfrom settings import EXIT_CMD\n\n\ndef main_menu():\n print(\n \"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\"\n )\n while True:\n command = input('guest@hackabank$ ')\n if command == 'register':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n sql_manager.register(username, password)\n print('Registration Successfull')\n elif command == 'login':\n username = input('Enter your username: ')\n password = getpass(prompt='Enter your password: ')\n logged_user = sql_manager.login(username, password)\n if logged_user:\n logged_menu(logged_user)\n else:\n print('Login failed')\n continue\n elif command == 'help':\n print(\n \"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\"\n )\n elif command == 'exit':\n break\n else:\n print('Not a valid command')\n continue\n\n\ndef logged_menu(logged_user):\n print('Welcome you are logged in as: ' + logged_user.get_username())\n while True:\n command = input('{}@hackabank# '.format(logged_user.get_username()))\n if command == 'info':\n print('You are: ' + logged_user.get_username())\n print('Your id is: ' + str(logged_user.get_id()))\n print('Your balance is:' + str(logged_user.get_balance()) + '$')\n elif command == 'changepass':\n new_pass = input('Enter your new password: ')\n sql_manager.change_pass(new_pass, logged_user)\n elif command == 'change-message':\n new_message = input('Enter your new message: ')\n sql_manager.change_message(new_message, logged_user)\n elif command == 'show-message':\n print(logged_user.get_message())\n elif command == 'help':\n print('info - for showing account info')\n print('changepass - for changing passowrd')\n print('change-message - for changing users message')\n print('show-message - for showing users message')\n elif command in EXIT_CMD:\n break\n else:\n print('Not such a command!')\n continue\n",
"step-5": "#!/usr/bin/env python3\nimport sql_manager\nimport Client\nfrom getpass import getpass\nfrom settings import EXIT_CMD\n\n\ndef main_menu():\n print(\"\"\"Welcome to our bank service. You are not logged in.\n Please register or login\"\"\")\n\n while True:\n command = input(\"guest@hackabank$ \")\n\n if command == \"register\":\n username = input(\"Enter your username: \")\n password = getpass(prompt=\"Enter your password: \")\n sql_manager.register(username, password)\n print(\"Registration Successfull\")\n elif command == \"login\":\n username = input(\"Enter your username: \")\n password = getpass(prompt=\"Enter your password: \")\n logged_user = sql_manager.login(username, password)\n\n if logged_user:\n logged_menu(logged_user)\n else:\n print(\"Login failed\")\n continue\n\n elif command == \"help\":\n print(\"\"\"login - for logging in!\n register - for creating new account!\n exit - for closing program!\"\"\")\n\n elif command == \"exit\":\n break\n\n else:\n print(\"Not a valid command\")\n continue\n\n\ndef logged_menu(logged_user):\n print(\"Welcome you are logged in as: \" + logged_user.get_username())\n while True:\n command = input(\"{}@hackabank# \".format(logged_user.get_username()))\n\n if command == \"info\":\n print(\"You are: \" + logged_user.get_username())\n print(\"Your id is: \" + str(logged_user.get_id()))\n print(\"Your balance is:\" + str(logged_user.get_balance()) + \"$\")\n\n elif command == \"changepass\":\n new_pass = input(\"Enter your new password: \")\n sql_manager.change_pass(new_pass, logged_user)\n\n elif command == \"change-message\":\n new_message = input(\"Enter your new message: \")\n sql_manager.change_message(new_message, logged_user)\n\n elif command == \"show-message\":\n print(logged_user.get_message())\n\n elif command == \"help\":\n print(\"info - for showing account info\")\n print(\"changepass - for changing passowrd\")\n print(\"change-message - for changing users message\")\n print(\"show-message - for showing users message\")\n elif command in EXIT_CMD:\n break\n else:\n print(\"Not such a command!\")\n continue\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the storage format CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import storage_format
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the storage format CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]
Test argument parser.
{0:s}:
--storage_format FORMAT, --storage-format FORMAT
Format of the storage file, the default is: sqlite.
Supported options: sqlite
--task_storage_format FORMAT, --task-storage-format FORMAT
Format for task storage, the default is: sqlite.
Supported options: redis, sqlite
""".format(cli_test_lib.ARGPARSE_OPTIONS)
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
storage_format.StorageFormatArgumentsHelper.AddArguments(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.storage_format = 'sqlite'
options.task_storage_format = 'sqlite'
test_tool = tools.CLITool()
storage_format.StorageFormatArgumentsHelper.ParseOptions(options, test_tool)
self.assertEqual(test_tool._storage_format, options.storage_format)
self.assertEqual(
test_tool._task_storage_format, options.task_storage_format)
with self.assertRaises(errors.BadConfigObject):
storage_format.StorageFormatArgumentsHelper.ParseOptions(options, None)
with self.assertRaises(errors.BadConfigOption):
options.storage_format = 'bogus'
storage_format.StorageFormatArgumentsHelper.ParseOptions(
options, test_tool)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "2075e7e05882524c295c8542ca7aefae2cf3e0fc",
"index": 5951,
"step-1": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n <mask token>\n <mask token>\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport argparse\nimport unittest\nfrom plaso.cli import tools\nfrom plaso.cli.helpers import storage_format\nfrom plaso.lib import errors\nfrom tests.cli import test_lib as cli_test_lib\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n _EXPECTED_OUTPUT = (\n \"\"\"usage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\"\n .format(cli_test_lib.ARGPARSE_OPTIONS))\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(prog='cli_helper.py',\n description='Test argument parser.', add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n storage_format.StorageFormatArgumentsHelper.AddArguments(\n argument_parser)\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(test_tool._task_storage_format, options.\n task_storage_format)\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n None)\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options,\n test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n\nimport argparse\nimport unittest\n\nfrom plaso.cli import tools\nfrom plaso.cli.helpers import storage_format\nfrom plaso.lib import errors\n\nfrom tests.cli import test_lib as cli_test_lib\n\n\nclass StorageFormatArgumentsHelperTest(cli_test_lib.CLIToolTestCase):\n \"\"\"Tests for the storage format CLI arguments helper.\"\"\"\n\n # pylint: disable=no-member,protected-access\n\n _EXPECTED_OUTPUT = \"\"\"\\\nusage: cli_helper.py [--storage_format FORMAT] [--task_storage_format FORMAT]\n\nTest argument parser.\n\n{0:s}:\n --storage_format FORMAT, --storage-format FORMAT\n Format of the storage file, the default is: sqlite.\n Supported options: sqlite\n --task_storage_format FORMAT, --task-storage-format FORMAT\n Format for task storage, the default is: sqlite.\n Supported options: redis, sqlite\n\"\"\".format(cli_test_lib.ARGPARSE_OPTIONS)\n\n def testAddArguments(self):\n \"\"\"Tests the AddArguments function.\"\"\"\n argument_parser = argparse.ArgumentParser(\n prog='cli_helper.py', description='Test argument parser.',\n add_help=False,\n formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)\n\n storage_format.StorageFormatArgumentsHelper.AddArguments(argument_parser)\n\n output = self._RunArgparseFormatHelp(argument_parser)\n self.assertEqual(output, self._EXPECTED_OUTPUT)\n\n def testParseOptions(self):\n \"\"\"Tests the ParseOptions function.\"\"\"\n options = cli_test_lib.TestOptions()\n options.storage_format = 'sqlite'\n options.task_storage_format = 'sqlite'\n\n test_tool = tools.CLITool()\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, test_tool)\n\n self.assertEqual(test_tool._storage_format, options.storage_format)\n self.assertEqual(\n test_tool._task_storage_format, options.task_storage_format)\n\n with self.assertRaises(errors.BadConfigObject):\n storage_format.StorageFormatArgumentsHelper.ParseOptions(options, None)\n\n with self.assertRaises(errors.BadConfigOption):\n options.storage_format = 'bogus'\n storage_format.StorageFormatArgumentsHelper.ParseOptions(\n options, test_tool)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Importex.atest()
<|reserved_special_token_1|>
import Importex
Importex.atest()
<|reserved_special_token_1|>
# 同一目录下的引用调用还是随意导入使用的
# 跨包使用就需要使用TwoUsage里面的两种方式。
import Importex
Importex.atest()
|
flexible
|
{
"blob_id": "1a66e7f59ada43deb8e28b9806dc4fb9be4ae247",
"index": 5771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nImportex.atest()\n",
"step-3": "import Importex\nImportex.atest()\n",
"step-4": "# 同一目录下的引用调用还是随意导入使用的\n# 跨包使用就需要使用TwoUsage里面的两种方式。\n\nimport Importex\n\nImportex.atest()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Tool:
def __init__(self, name, weight):
self.name = name
self.weight = weight
def __repr__(self):
return f'Tool({self.name!r},{self.weight})'
tools = [
Tool('수준계', 3.5),
Tool('해머', 1.25),
Tool('스크류드라이버', .5),
Tool('끌', .25)
]
print(repr(tools))
tools.sort(reverse=True, key=lambda x: len(x.name))
print(tools)
|
normal
|
{
"blob_id": "173b8e66ead62e3aa70805e42e06ea05257d5ee2",
"index": 2965,
"step-1": "class Tool:\n <mask token>\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\n",
"step-2": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\n",
"step-3": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\n<mask token>\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)\n",
"step-4": "class Tool:\n\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\ntools = [Tool('수준계', 3.5), Tool('해머', 1.25), Tool('스크류드라이버', 0.5), Tool('끌',\n 0.25)]\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)\n",
"step-5": "class Tool:\n def __init__(self, name, weight):\n self.name = name\n self.weight = weight\n\n def __repr__(self):\n return f'Tool({self.name!r},{self.weight})'\n\n\ntools = [\n Tool('수준계', 3.5),\n Tool('해머', 1.25),\n Tool('스크류드라이버', .5),\n Tool('끌', .25)\n]\nprint(repr(tools))\ntools.sort(reverse=True, key=lambda x: len(x.name))\nprint(tools)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# What is the 10 001st prime number?
primes = [2]
def is_prime(a, primes):
b = a
for x in primes:
d, m = divmod(b, x)
if m == 0:
return False
else:
return True
a = 3
while len(primes) <= 10001:
# There's something faster than just checking all of them, but this
# will do for now.
if is_prime(a, primes):
primes.append(a)
print a
a += 1
print primes[10000]
|
normal
|
{
"blob_id": "e5e516b6a39a6df03f1e5f80fe2d9e3978e856aa",
"index": 2310,
"step-1": "# What is the 10 001st prime number?\n\nprimes = [2]\n\n\ndef is_prime(a, primes):\n b = a\n for x in primes:\n d, m = divmod(b, x)\n if m == 0:\n return False\n else:\n return True\n\n\na = 3\nwhile len(primes) <= 10001:\n # There's something faster than just checking all of them, but this\n # will do for now.\n if is_prime(a, primes):\n primes.append(a)\n print a\n a += 1\n\n\nprint primes[10000]\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def filter_lines(in_filename, in_filename2,out_filename):
"""Read records from in_filename and write records to out_filename if
the beginning of the line (taken up to the first comma at or after
position 11) is found in keys (which must be a set of byte strings).
"""
proper_convert = 0
missing_convert = 0
fourteen_set = set()
with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:
for line in in_f:
vals = line.strip().split(",")
fips = vals[0]
if(fips not in fourteen_set):
fourteen_set.add(fips)
for line in in_f2:
vals = line.strip().split(",")
fips = vals[0]
count = vals[1]
proper_convert += 1
if(fips not in fourteen_set):
new_line = str(fips)+","+str(count)+"\n"
out_f.write(new_line)
missing_convert += 1
return (proper_convert, missing_convert)
in_filename = "/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv"
in_filename1 = "/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv"
out_filename= "/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv"
counter1, new_vals1 = filter_lines(in_filename,in_filename1,out_filename)
print(counter1)
print(new_vals1)
|
normal
|
{
"blob_id": "502e0f0c6376617dc094fcdd47bea9773d011864",
"index": 900,
"step-1": "<mask token>\n",
"step-2": "def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\n<mask token>\n",
"step-3": "def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\n<mask token>\nprint(counter1)\nprint(new_vals1)\n",
"step-4": "def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\nin_filename = '/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv'\nin_filename1 = '/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv'\nout_filename = '/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv'\ncounter1, new_vals1 = filter_lines(in_filename, in_filename1, out_filename)\nprint(counter1)\nprint(new_vals1)\n",
"step-5": "def filter_lines(in_filename, in_filename2,out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(\",\")\n fips = vals[0]\n if(fips not in fourteen_set):\n fourteen_set.add(fips)\n \n for line in in_f2:\n vals = line.strip().split(\",\")\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if(fips not in fourteen_set):\n new_line = str(fips)+\",\"+str(count)+\"\\n\"\n out_f.write(new_line)\n missing_convert += 1\n\n return (proper_convert, missing_convert)\n\nin_filename = \"/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv\"\nin_filename1 = \"/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv\"\nout_filename= \"/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv\"\n\ncounter1, new_vals1 = filter_lines(in_filename,in_filename1,out_filename)\nprint(counter1)\nprint(new_vals1)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class interface(kernel.service.service):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class interface(kernel.service.service):
def __init__(self, name):
self.name = name
<|reserved_special_token_1|>
import kernel.service
class interface(kernel.service.service):
def __init__(self, name):
self.name = name
<|reserved_special_token_1|>
# Jarvis interface class definition
import kernel.service
class interface(kernel.service.service):
def __init__(self, name):
self.name = name
|
flexible
|
{
"blob_id": "237f1f72ac3ef381f115a88025518f387825ff79",
"index": 9696,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass interface(kernel.service.service):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass interface(kernel.service.service):\n\n def __init__(self, name):\n self.name = name\n",
"step-4": "import kernel.service\n\n\nclass interface(kernel.service.service):\n\n def __init__(self, name):\n self.name = name\n",
"step-5": "# Jarvis interface class definition\nimport kernel.service\n\nclass interface(kernel.service.service):\n def __init__(self, name):\n self.name = name\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def Move(direction, delay=0.2):
PressKey(dk[config[direction]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
def Action(direction, pull=None):
delay = 0.6
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config['Grab']])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config['Grab']])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config['Grab']])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Move(direction, delay=0.2):
PressKey(dk[config[direction]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
def Action(direction, pull=None):
delay = 0.6
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config['Grab']])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config['Grab']])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config['Grab']])
<|reserved_special_token_0|>
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right':
False, 'Grab': False}
if data['Style'] == 'Manual':
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(' ')[1], delay=0.6)
elif c in turnKeys:
Move(c.split(' ')[1], delay=0.1)
elif c in pullKeys:
direction = c.split(' ')[1]
Action(direction, pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(' ')[1])
else:
print(c + ' is not recognized as a command')
print(c)
except Exception as e:
print(e)
elif data['Style'] == 'Recorded':
print('Reading Recorded file')
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(), 2)
print('length of recording: ' + str(total_time))
while time.time() < start_time + total_time:
timer = round(time.time() - start_time, 2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[
c['State']]:
print('pressing key ' + c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print('releasing ' + c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
config = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':
'LBRACKET', 'Drop': 'RBRACKET'}
def Move(direction, delay=0.2):
PressKey(dk[config[direction]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
def Action(direction, pull=None):
delay = 0.6
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config['Grab']])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config['Grab']])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config['Grab']])
moveKeys = ['Up', 'Down', 'Left', 'Right']
climbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']
turnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']
pullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']
pushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']
inverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':
'Left'}
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right':
False, 'Grab': False}
if data['Style'] == 'Manual':
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(' ')[1], delay=0.6)
elif c in turnKeys:
Move(c.split(' ')[1], delay=0.1)
elif c in pullKeys:
direction = c.split(' ')[1]
Action(direction, pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(' ')[1])
else:
print(c + ' is not recognized as a command')
print(c)
except Exception as e:
print(e)
elif data['Style'] == 'Recorded':
print('Reading Recorded file')
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(), 2)
print('length of recording: ' + str(total_time))
while time.time() < start_time + total_time:
timer = round(time.time() - start_time, 2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[
c['State']]:
print('pressing key ' + c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print('releasing ' + c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False
<|reserved_special_token_1|>
import json
import time
from keySender import PressKey, ReleaseKey, dk
config = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':
'LBRACKET', 'Drop': 'RBRACKET'}
def Move(direction, delay=0.2):
PressKey(dk[config[direction]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
def Action(direction, pull=None):
delay = 0.6
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config['Grab']])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config['Grab']])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config['Grab']])
moveKeys = ['Up', 'Down', 'Left', 'Right']
climbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']
turnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']
pullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']
pushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']
inverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':
'Left'}
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right':
False, 'Grab': False}
if data['Style'] == 'Manual':
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(' ')[1], delay=0.6)
elif c in turnKeys:
Move(c.split(' ')[1], delay=0.1)
elif c in pullKeys:
direction = c.split(' ')[1]
Action(direction, pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(' ')[1])
else:
print(c + ' is not recognized as a command')
print(c)
except Exception as e:
print(e)
elif data['Style'] == 'Recorded':
print('Reading Recorded file')
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(), 2)
print('length of recording: ' + str(total_time))
while time.time() < start_time + total_time:
timer = round(time.time() - start_time, 2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[
c['State']]:
print('pressing key ' + c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print('releasing ' + c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False
<|reserved_special_token_1|>
import json
import time
from keySender import PressKey,ReleaseKey,dk
config = {
"Up": "W",
"Down": "S",
"Left": "A",
"Right": "D",
"Grab": "LBRACKET",
"Drop": "RBRACKET"
}
### Commands
# Move
def Move(direction,delay=.2):
PressKey(dk[config[direction]])
time.sleep(delay) # Replace with a better condition
ReleaseKey(dk[config[direction]])
# Push/Pull
def Action(direction,pull=None):
delay = .6
# If pulling - ensure you are grabbing the right block
# I.e. 'Pull Right' needs to face left first
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config["Grab"]])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config["Grab"]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config["Grab"]])
# References for keywords in file
moveKeys = ["Up","Down","Left","Right"]
climbKeys = ["Climb Up", "Climb Down", "Climb Left", "Climb Right"]
turnKeys = ["Turn Up", "Turn Down", "Turn Left", "Turn Right"]
pullKeys = ["Pull Up", "Pull Down","Pull Left", "Pull Right"]
pushKeys = ["Push Up", "Push Down", "Push Left", "Push Right"]
# Simplify turning
inverseDirections = {
"Up": "Down",
"Down": "Up",
"Left": "Right",
"Right": "Left",
}
### Interpreter
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {"Up": False, "Down": False, "Left": False, "Right": False, "Grab": False}
if data['Style'] == "Manual":
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(" ")[1],delay=.6)
elif c in turnKeys:
Move(c.split(" ")[1],delay=.1)
elif c in pullKeys:
direction = c.split(" ")[1]
Action(direction,pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(" ")[1])
else:
print(c+" is not recognized as a command")
print(c)
except Exception as e:
print(e)
elif data['Style'] == "Recorded":
print("Reading Recorded file")
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(),2)
print("length of recording: "+str(total_time))
while time.time() < start_time+total_time:
timer = round(time.time() - start_time,2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[c['State']]:
print("pressing key "+ c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print("releasing "+c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False
|
flexible
|
{
"blob_id": "1e7789b154271eb8407a027c6ddf6c941cc69a41",
"index": 3070,
"step-1": "<mask token>\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\n<mask token>\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-3": "<mask token>\nconfig = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':\n 'LBRACKET', 'Drop': 'RBRACKET'}\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\nmoveKeys = ['Up', 'Down', 'Left', 'Right']\nclimbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']\nturnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']\npullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']\npushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']\ninverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':\n 'Left'}\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-4": "import json\nimport time\nfrom keySender import PressKey, ReleaseKey, dk\nconfig = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':\n 'LBRACKET', 'Drop': 'RBRACKET'}\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\nmoveKeys = ['Up', 'Down', 'Left', 'Right']\nclimbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']\nturnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']\npullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']\npushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']\ninverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':\n 'Left'}\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-5": "import json\nimport time\nfrom keySender import PressKey,ReleaseKey,dk\nconfig = {\n\t\"Up\": \"W\",\n\t\"Down\": \"S\",\n\t\"Left\": \"A\",\n\t\"Right\": \"D\",\n\t\"Grab\": \"LBRACKET\",\n\t\"Drop\": \"RBRACKET\"\n}\n\n### Commands\n# Move\ndef Move(direction,delay=.2):\n\tPressKey(dk[config[direction]])\n\ttime.sleep(delay) # Replace with a better condition\n\tReleaseKey(dk[config[direction]])\n\n# Push/Pull\ndef Action(direction,pull=None):\n\tdelay = .6\n\t# If pulling - ensure you are grabbing the right block\n\t# I.e. 'Pull Right' needs to face left first\n\tif pull:\n\t\tdelay = 1\n\t\tPressKey(dk[config[pull]])\n\t\tReleaseKey(dk[config[pull]])\n\t\tPressKey(dk[config[\"Grab\"]])\n\t\tPressKey(dk[config[direction]])\n\telse:\n\t\tPressKey(dk[config[direction]])\n\t\tPressKey(dk[config[\"Grab\"]])\n\ttime.sleep(delay)\n\tReleaseKey(dk[config[direction]])\n\tReleaseKey(dk[config[\"Grab\"]])\n\n# References for keywords in file\nmoveKeys = [\"Up\",\"Down\",\"Left\",\"Right\"]\nclimbKeys = [\"Climb Up\", \"Climb Down\", \"Climb Left\", \"Climb Right\"]\nturnKeys = [\"Turn Up\", \"Turn Down\", \"Turn Left\", \"Turn Right\"]\npullKeys = [\"Pull Up\", \"Pull Down\",\"Pull Left\", \"Pull Right\"]\npushKeys = [\"Push Up\", \"Push Down\", \"Push Left\", \"Push Right\"]\n\n# Simplify turning\ninverseDirections = {\n\t\"Up\": \"Down\",\n\t\"Down\": \"Up\",\n\t\"Left\": \"Right\",\n\t\"Right\": \"Left\",\n}\n\n### Interpreter\ndef init(filePath):\n\tdata = json.load(open(filePath))\n\tpushed_keys = {\"Up\": False, \"Down\": False, \"Left\": False, \"Right\": False, \"Grab\": False}\n\tif data['Style'] == \"Manual\":\n\t\tfor c in data['Main']:\n\t\t\ttry:\n\t\t\t\tif c in moveKeys:\n\t\t\t\t\tMove(c)\n\t\t\t\telif c in climbKeys:\n\t\t\t\t\tMove(c.split(\" \")[1],delay=.6)\n\t\t\t\telif c in turnKeys:\n\t\t\t\t\tMove(c.split(\" \")[1],delay=.1)\n\t\t\t\telif c in pullKeys:\n\t\t\t\t\tdirection = c.split(\" \")[1]\n\t\t\t\t\tAction(direction,pull=inverseDirections[direction])\n\t\t\t\telif c in pushKeys:\n\t\t\t\t\tAction(c.split(\" \")[1])\n\t\t\t\telse:\n\t\t\t\t\tprint(c+\" is not recognized as a command\")\n\t\t\t\tprint(c)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\n\telif data['Style'] == \"Recorded\":\n\t\tprint(\"Reading Recorded file\")\n\t\ttotal_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n\t\tstart_time = round(time.time(),2)\n\t\tprint(\"length of recording: \"+str(total_time))\n\t\twhile time.time() < start_time+total_time:\n\t\t\ttimer = round(time.time() - start_time,2)\n\t\t\tfor c in data['Main']:\n\t\t\t\tif timer > c['Start'] and timer < c['End'] and not pushed_keys[c['State']]:\n\t\t\t\t\tprint(\"pressing key \"+ c['State'])\n\t\t\t\t\tPressKey(dk[config[c['State']]])\n\t\t\t\t\tpushed_keys[c['State']] = True\n\t\t\t\telif timer == c['End'] and pushed_keys[c['State']]:\n\t\t\t\t\tprint(\"releasing \"+c['State'])\n\t\t\t\t\tReleaseKey(dk[config[c['State']]])\n\t\t\t\t\tpushed_keys[c['State']] = False",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
from greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df
def get_runs_counts_by_match():
ipl_df = read_csv_data_to_df("data/ipl_dataset.csv")
df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])
df2 = df1.groupby(['match_code','runs'], as_index=False).count()
df = df2.pivot(index='match_code',columns='runs')
df = df.fillna(0)
df = df.astype('int')
return df
get_runs_counts_by_match()
|
normal
|
{
"blob_id": "4f06d87ec79c20206ff45ba72ab77844076be553",
"index": 9707,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n",
"step-4": "import pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df('data/ipl_dataset.csv')\n df1 = pd.DataFrame(ipl_df[['match_code', 'runs', 'venue']])\n df2 = df1.groupby(['match_code', 'runs'], as_index=False).count()\n df = df2.pivot(index='match_code', columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\n\nget_runs_counts_by_match()\n",
"step-5": "\nimport pandas as pd\nfrom greyatomlib.pandas_project.q01_read_csv_data_to_df.build import read_csv_data_to_df\n\ndef get_runs_counts_by_match():\n ipl_df = read_csv_data_to_df(\"data/ipl_dataset.csv\")\n df1 = pd.DataFrame(ipl_df[['match_code','runs','venue']])\n df2 = df1.groupby(['match_code','runs'], as_index=False).count()\n df = df2.pivot(index='match_code',columns='runs')\n df = df.fillna(0)\n df = df.astype('int')\n return df\n\nget_runs_counts_by_match()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#---------------------------------------------
# File name: phase2app.py
# Description: Launches GUI for Twitter User Timeline Sentiment Analysis program
# Author: Gilbert Yap ([email protected])
# Date: October 03, 2020
#---------------------------------------------
from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout, QMessageBox
from PySide2.QtCore import Qt, QFile, QRegExp
from PySide2.QtGui import QRegExpValidator
from phase2GUI import Ui_Dialog
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import configparser, csv, datetime, sys
sys.path.insert(1, '..\\SharedFiles\\')
import matplotlib.pyplot as plt
import helper, phase2Functions
SETTINGS_FILE = '..\\SharedFiles\\settings.ini'
class Ui_Window(QDialog):
def __init__(self):
super(Ui_Window, self).__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
# Set regex validator for the username
regex = QRegExp("\w+")
validator = QRegExpValidator(regex)
self.ui.usernameLineEdit.setValidator(validator)
# Set the end date to today by default
self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)
self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)
self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)
# Place a plot inside of plotDisplayGroupBox
self.figure = plt.figure()
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
self.ui.plotDisplayGroupBox.setLayout(layout)
# Set up signals
self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)
self.ui.exportPushButton.clicked.connect(self.exportValues)
# Init APIs
settings = configparser.ConfigParser()
settings.read(SETTINGS_FILE)
helper.print_with_stars('Initializing APIs')
(twitterApi, googleClient, errors) = phase2Functions.init_apis(settings['KEYS']['api_key'], settings['KEYS']['api_secret_key'])
if(len(errors) > 0):
self.printMessages(errors)
sys.exit(1)
else:
self.twitterApi = twitterApi
self.googleClient = googleClient
self.show()
'''
Plot the sentiment score
Input - self:Ui_Window
Output - None
'''
def plotSentiment(self):
QApplication.setOverrideCursor(Qt.WaitCursor)
# Get the sentiment data
startDate = self.get_start_date()
endDate = self.get_end_date()
if (startDate is None) or (endDate is None):
return
(dateList, scoreList, magnitudeList, tweetList, errors) = phase2Functions.generate_data_lists(self.twitterApi, self.googleClient, self.get_username(), startDate, endDate)
QApplication.restoreOverrideCursor()
# If there were any errors, print them out
if(len(errors) > 0):
self.printMessages(errors)
else:
# If there are no errors, format and plot out the data
self.plotData = (dateList, scoreList, magnitudeList)
self.tweetList = tweetList
self.figure.clear()
ax = self.figure.add_subplot(111)
self.figure.subplots_adjust(top=0.88,
bottom=0.255,
left=0.17,
right=0.9,
hspace=0.2,
wspace=0.2)
ax.set_title("Sentiment Analysis of @{}'s tweets".format(self.get_username(),))
ax.set_xlabel("Date")
ax.set_ylabel("Sentiment Value")
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
ax.plot(self.plotData[0],self.plotData[1],"-bo",label='Sentiment Score')
ax.plot(self.plotData[0],self.plotData[2], "-ro",label='Sentiment Magnitude')
ax.legend(loc="lower right")
self.canvas.draw()
self.enableExport()
'''
Gets username from text field
Input - self:Ui_Window
Output - string
'''
def get_username(self):
return (self.ui.usernameLineEdit.text())
'''
Gets start date from spin boxes
Input - self:Ui_Window
Output - datetime.datetime
'''
def get_start_date(self):
start_month = self.ui.startMonthSpinBox.value()
start_day = self.ui.startDaySpinBox.value()
start_year = self.ui.startYearSpinBox.value()
try:
startDate = datetime.datetime(start_year, start_month,start_day)
except:
self.printMessages(['Start date is improperly set. Check to see that the date is correct/exists.'])
return None
return startDate
'''
Gets end date from spin boxes
Input - self:Ui_Window
Output - datetime.datetime
'''
def get_end_date(self):
end_month = self.ui.endMonthSpinBox.value()
end_day = self.ui.endDaySpinBox.value()
end_year = self.ui.endYearSpinBox.value()
try:
endDate = datetime.datetime(end_year, end_month,end_day)
except:
self.printMessages(['End date is improperly set. Check to see that the date is correct/exists.'])
return None
return endDate
'''
Toggles the export button.
Input - self:Ui_Window
Output - None
'''
def enableExport(self):
self.ui.exportPushButton.setEnabled(True)
'''
Exports date, score/magntitude, and tweet text to csv and pops up a window when done
Input - self:Ui_Window
Output - None
'''
def exportValues(self):
currentTimeDate = datetime.datetime.now()
currentTimeDate = str(currentTimeDate.year)+'-'+str(currentTimeDate.month)+'-'+str(currentTimeDate.day)+'-'+str(currentTimeDate.hour)+'-'+str(currentTimeDate.minute)+'-'+str(currentTimeDate.second)
with open(currentTimeDate+'_'+self.get_username()+'_score.csv', mode='w') as score_file:
writer = csv.writer(score_file)
for i in range(len(self.plotData[0])):
writer.writerow( [ str(self.plotData[0][i]), self.plotData[1][i],
self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )
with open(currentTimeDate+'_'+self.get_username()+'_magnitude.csv', mode='w') as magnitude_file:
writer = csv.writer(magnitude_file)
for i in range(len(self.plotData[0])):
writer.writerow( [ str(self.plotData[0][i]), self.plotData[2][i],
self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )
msgBox = QMessageBox()
msgBox.setText('CSV files exported!')
msgBox.exec()
'''
Prints out messages in a pop up window
Input - self:Ui_Window
Output - None
'''
def printMessages(self, messageList):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setWindowTitle('Errors occured!')
tempString = ''
for message in messageList:
tempString += (message + '\n')
msgBox.setText(tempString)
msgBox.exec()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Ui_Window()
window.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "8cabacb64f3b193b957c61d6e1ca21f2046e52d1",
"index": 8199,
"step-1": "<mask token>\n\n\nclass Ui_Window(QDialog):\n\n def __init__(self):\n super(Ui_Window, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n regex = QRegExp('\\\\w+')\n validator = QRegExpValidator(regex)\n self.ui.usernameLineEdit.setValidator(validator)\n self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)\n self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)\n self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.ui.plotDisplayGroupBox.setLayout(layout)\n self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)\n self.ui.exportPushButton.clicked.connect(self.exportValues)\n settings = configparser.ConfigParser()\n settings.read(SETTINGS_FILE)\n helper.print_with_stars('Initializing APIs')\n twitterApi, googleClient, errors = phase2Functions.init_apis(settings\n ['KEYS']['api_key'], settings['KEYS']['api_secret_key'])\n if len(errors) > 0:\n self.printMessages(errors)\n sys.exit(1)\n else:\n self.twitterApi = twitterApi\n self.googleClient = googleClient\n self.show()\n <mask token>\n\n def plotSentiment(self):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n startDate = self.get_start_date()\n endDate = self.get_end_date()\n if startDate is None or endDate is None:\n return\n dateList, scoreList, magnitudeList, tweetList, errors = (\n phase2Functions.generate_data_lists(self.twitterApi, self.\n googleClient, self.get_username(), startDate, endDate))\n QApplication.restoreOverrideCursor()\n if len(errors) > 0:\n self.printMessages(errors)\n else:\n self.plotData = dateList, scoreList, magnitudeList\n self.tweetList = tweetList\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.88, bottom=0.255, left=0.17,\n right=0.9, hspace=0.2, wspace=0.2)\n ax.set_title(\"Sentiment Analysis of @{}'s tweets\".format(self.\n get_username()))\n ax.set_xlabel('Date')\n ax.set_ylabel('Sentiment Value')\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n ax.plot(self.plotData[0], self.plotData[1], '-bo', label=\n 'Sentiment Score')\n ax.plot(self.plotData[0], self.plotData[2], '-ro', label=\n 'Sentiment Magnitude')\n ax.legend(loc='lower right')\n self.canvas.draw()\n self.enableExport()\n <mask token>\n\n def get_username(self):\n return self.ui.usernameLineEdit.text()\n <mask token>\n\n def get_start_date(self):\n start_month = self.ui.startMonthSpinBox.value()\n start_day = self.ui.startDaySpinBox.value()\n start_year = self.ui.startYearSpinBox.value()\n try:\n startDate = datetime.datetime(start_year, start_month, start_day)\n except:\n self.printMessages([\n 'Start date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return startDate\n <mask token>\n\n def get_end_date(self):\n end_month = self.ui.endMonthSpinBox.value()\n end_day = self.ui.endDaySpinBox.value()\n end_year = self.ui.endYearSpinBox.value()\n try:\n endDate = datetime.datetime(end_year, end_month, end_day)\n except:\n self.printMessages([\n 'End date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return endDate\n <mask token>\n\n def enableExport(self):\n self.ui.exportPushButton.setEnabled(True)\n <mask token>\n\n def exportValues(self):\n currentTimeDate = datetime.datetime.now()\n currentTimeDate = str(currentTimeDate.year) + '-' + str(currentTimeDate\n .month) + '-' + str(currentTimeDate.day) + '-' + str(\n currentTimeDate.hour) + '-' + str(currentTimeDate.minute\n ) + '-' + str(currentTimeDate.second)\n with open(currentTimeDate + '_' + self.get_username() +\n '_score.csv', mode='w') as score_file:\n writer = csv.writer(score_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[1]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n with open(currentTimeDate + '_' + self.get_username() +\n '_magnitude.csv', mode='w') as magnitude_file:\n writer = csv.writer(magnitude_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[2]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n msgBox = QMessageBox()\n msgBox.setText('CSV files exported!')\n msgBox.exec()\n <mask token>\n\n def printMessages(self, messageList):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setWindowTitle('Errors occured!')\n tempString = ''\n for message in messageList:\n tempString += message + '\\n'\n msgBox.setText(tempString)\n msgBox.exec()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(1, '..\\\\SharedFiles\\\\')\n<mask token>\n\n\nclass Ui_Window(QDialog):\n\n def __init__(self):\n super(Ui_Window, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n regex = QRegExp('\\\\w+')\n validator = QRegExpValidator(regex)\n self.ui.usernameLineEdit.setValidator(validator)\n self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)\n self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)\n self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.ui.plotDisplayGroupBox.setLayout(layout)\n self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)\n self.ui.exportPushButton.clicked.connect(self.exportValues)\n settings = configparser.ConfigParser()\n settings.read(SETTINGS_FILE)\n helper.print_with_stars('Initializing APIs')\n twitterApi, googleClient, errors = phase2Functions.init_apis(settings\n ['KEYS']['api_key'], settings['KEYS']['api_secret_key'])\n if len(errors) > 0:\n self.printMessages(errors)\n sys.exit(1)\n else:\n self.twitterApi = twitterApi\n self.googleClient = googleClient\n self.show()\n \"\"\"\n Plot the sentiment score\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def plotSentiment(self):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n startDate = self.get_start_date()\n endDate = self.get_end_date()\n if startDate is None or endDate is None:\n return\n dateList, scoreList, magnitudeList, tweetList, errors = (\n phase2Functions.generate_data_lists(self.twitterApi, self.\n googleClient, self.get_username(), startDate, endDate))\n QApplication.restoreOverrideCursor()\n if len(errors) > 0:\n self.printMessages(errors)\n else:\n self.plotData = dateList, scoreList, magnitudeList\n self.tweetList = tweetList\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.88, bottom=0.255, left=0.17,\n right=0.9, hspace=0.2, wspace=0.2)\n ax.set_title(\"Sentiment Analysis of @{}'s tweets\".format(self.\n get_username()))\n ax.set_xlabel('Date')\n ax.set_ylabel('Sentiment Value')\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n ax.plot(self.plotData[0], self.plotData[1], '-bo', label=\n 'Sentiment Score')\n ax.plot(self.plotData[0], self.plotData[2], '-ro', label=\n 'Sentiment Magnitude')\n ax.legend(loc='lower right')\n self.canvas.draw()\n self.enableExport()\n \"\"\"\n Gets username from text field\n Input - self:Ui_Window\n Output - string\n \"\"\"\n\n def get_username(self):\n return self.ui.usernameLineEdit.text()\n \"\"\"\n Gets start date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_start_date(self):\n start_month = self.ui.startMonthSpinBox.value()\n start_day = self.ui.startDaySpinBox.value()\n start_year = self.ui.startYearSpinBox.value()\n try:\n startDate = datetime.datetime(start_year, start_month, start_day)\n except:\n self.printMessages([\n 'Start date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return startDate\n \"\"\"\n Gets end date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_end_date(self):\n end_month = self.ui.endMonthSpinBox.value()\n end_day = self.ui.endDaySpinBox.value()\n end_year = self.ui.endYearSpinBox.value()\n try:\n endDate = datetime.datetime(end_year, end_month, end_day)\n except:\n self.printMessages([\n 'End date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return endDate\n \"\"\"\n Toggles the export button.\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def enableExport(self):\n self.ui.exportPushButton.setEnabled(True)\n \"\"\"\n Exports date, score/magntitude, and tweet text to csv and pops up a window when done\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def exportValues(self):\n currentTimeDate = datetime.datetime.now()\n currentTimeDate = str(currentTimeDate.year) + '-' + str(currentTimeDate\n .month) + '-' + str(currentTimeDate.day) + '-' + str(\n currentTimeDate.hour) + '-' + str(currentTimeDate.minute\n ) + '-' + str(currentTimeDate.second)\n with open(currentTimeDate + '_' + self.get_username() +\n '_score.csv', mode='w') as score_file:\n writer = csv.writer(score_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[1]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n with open(currentTimeDate + '_' + self.get_username() +\n '_magnitude.csv', mode='w') as magnitude_file:\n writer = csv.writer(magnitude_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[2]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n msgBox = QMessageBox()\n msgBox.setText('CSV files exported!')\n msgBox.exec()\n \"\"\"\n Prints out messages in a pop up window\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def printMessages(self, messageList):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setWindowTitle('Errors occured!')\n tempString = ''\n for message in messageList:\n tempString += message + '\\n'\n msgBox.setText(tempString)\n msgBox.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Ui_Window()\n window.show()\n sys.exit(app.exec_())\n",
"step-3": "<mask token>\nsys.path.insert(1, '..\\\\SharedFiles\\\\')\n<mask token>\nSETTINGS_FILE = '..\\\\SharedFiles\\\\settings.ini'\n\n\nclass Ui_Window(QDialog):\n\n def __init__(self):\n super(Ui_Window, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n regex = QRegExp('\\\\w+')\n validator = QRegExpValidator(regex)\n self.ui.usernameLineEdit.setValidator(validator)\n self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)\n self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)\n self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.ui.plotDisplayGroupBox.setLayout(layout)\n self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)\n self.ui.exportPushButton.clicked.connect(self.exportValues)\n settings = configparser.ConfigParser()\n settings.read(SETTINGS_FILE)\n helper.print_with_stars('Initializing APIs')\n twitterApi, googleClient, errors = phase2Functions.init_apis(settings\n ['KEYS']['api_key'], settings['KEYS']['api_secret_key'])\n if len(errors) > 0:\n self.printMessages(errors)\n sys.exit(1)\n else:\n self.twitterApi = twitterApi\n self.googleClient = googleClient\n self.show()\n \"\"\"\n Plot the sentiment score\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def plotSentiment(self):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n startDate = self.get_start_date()\n endDate = self.get_end_date()\n if startDate is None or endDate is None:\n return\n dateList, scoreList, magnitudeList, tweetList, errors = (\n phase2Functions.generate_data_lists(self.twitterApi, self.\n googleClient, self.get_username(), startDate, endDate))\n QApplication.restoreOverrideCursor()\n if len(errors) > 0:\n self.printMessages(errors)\n else:\n self.plotData = dateList, scoreList, magnitudeList\n self.tweetList = tweetList\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.88, bottom=0.255, left=0.17,\n right=0.9, hspace=0.2, wspace=0.2)\n ax.set_title(\"Sentiment Analysis of @{}'s tweets\".format(self.\n get_username()))\n ax.set_xlabel('Date')\n ax.set_ylabel('Sentiment Value')\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n ax.plot(self.plotData[0], self.plotData[1], '-bo', label=\n 'Sentiment Score')\n ax.plot(self.plotData[0], self.plotData[2], '-ro', label=\n 'Sentiment Magnitude')\n ax.legend(loc='lower right')\n self.canvas.draw()\n self.enableExport()\n \"\"\"\n Gets username from text field\n Input - self:Ui_Window\n Output - string\n \"\"\"\n\n def get_username(self):\n return self.ui.usernameLineEdit.text()\n \"\"\"\n Gets start date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_start_date(self):\n start_month = self.ui.startMonthSpinBox.value()\n start_day = self.ui.startDaySpinBox.value()\n start_year = self.ui.startYearSpinBox.value()\n try:\n startDate = datetime.datetime(start_year, start_month, start_day)\n except:\n self.printMessages([\n 'Start date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return startDate\n \"\"\"\n Gets end date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_end_date(self):\n end_month = self.ui.endMonthSpinBox.value()\n end_day = self.ui.endDaySpinBox.value()\n end_year = self.ui.endYearSpinBox.value()\n try:\n endDate = datetime.datetime(end_year, end_month, end_day)\n except:\n self.printMessages([\n 'End date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return endDate\n \"\"\"\n Toggles the export button.\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def enableExport(self):\n self.ui.exportPushButton.setEnabled(True)\n \"\"\"\n Exports date, score/magntitude, and tweet text to csv and pops up a window when done\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def exportValues(self):\n currentTimeDate = datetime.datetime.now()\n currentTimeDate = str(currentTimeDate.year) + '-' + str(currentTimeDate\n .month) + '-' + str(currentTimeDate.day) + '-' + str(\n currentTimeDate.hour) + '-' + str(currentTimeDate.minute\n ) + '-' + str(currentTimeDate.second)\n with open(currentTimeDate + '_' + self.get_username() +\n '_score.csv', mode='w') as score_file:\n writer = csv.writer(score_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[1]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n with open(currentTimeDate + '_' + self.get_username() +\n '_magnitude.csv', mode='w') as magnitude_file:\n writer = csv.writer(magnitude_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[2]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n msgBox = QMessageBox()\n msgBox.setText('CSV files exported!')\n msgBox.exec()\n \"\"\"\n Prints out messages in a pop up window\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def printMessages(self, messageList):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setWindowTitle('Errors occured!')\n tempString = ''\n for message in messageList:\n tempString += message + '\\n'\n msgBox.setText(tempString)\n msgBox.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Ui_Window()\n window.show()\n sys.exit(app.exec_())\n",
"step-4": "from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout, QMessageBox\nfrom PySide2.QtCore import Qt, QFile, QRegExp\nfrom PySide2.QtGui import QRegExpValidator\nfrom phase2GUI import Ui_Dialog\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport configparser, csv, datetime, sys\nsys.path.insert(1, '..\\\\SharedFiles\\\\')\nimport matplotlib.pyplot as plt\nimport helper, phase2Functions\nSETTINGS_FILE = '..\\\\SharedFiles\\\\settings.ini'\n\n\nclass Ui_Window(QDialog):\n\n def __init__(self):\n super(Ui_Window, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n regex = QRegExp('\\\\w+')\n validator = QRegExpValidator(regex)\n self.ui.usernameLineEdit.setValidator(validator)\n self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)\n self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)\n self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.ui.plotDisplayGroupBox.setLayout(layout)\n self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)\n self.ui.exportPushButton.clicked.connect(self.exportValues)\n settings = configparser.ConfigParser()\n settings.read(SETTINGS_FILE)\n helper.print_with_stars('Initializing APIs')\n twitterApi, googleClient, errors = phase2Functions.init_apis(settings\n ['KEYS']['api_key'], settings['KEYS']['api_secret_key'])\n if len(errors) > 0:\n self.printMessages(errors)\n sys.exit(1)\n else:\n self.twitterApi = twitterApi\n self.googleClient = googleClient\n self.show()\n \"\"\"\n Plot the sentiment score\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def plotSentiment(self):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n startDate = self.get_start_date()\n endDate = self.get_end_date()\n if startDate is None or endDate is None:\n return\n dateList, scoreList, magnitudeList, tweetList, errors = (\n phase2Functions.generate_data_lists(self.twitterApi, self.\n googleClient, self.get_username(), startDate, endDate))\n QApplication.restoreOverrideCursor()\n if len(errors) > 0:\n self.printMessages(errors)\n else:\n self.plotData = dateList, scoreList, magnitudeList\n self.tweetList = tweetList\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.88, bottom=0.255, left=0.17,\n right=0.9, hspace=0.2, wspace=0.2)\n ax.set_title(\"Sentiment Analysis of @{}'s tweets\".format(self.\n get_username()))\n ax.set_xlabel('Date')\n ax.set_ylabel('Sentiment Value')\n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n ax.plot(self.plotData[0], self.plotData[1], '-bo', label=\n 'Sentiment Score')\n ax.plot(self.plotData[0], self.plotData[2], '-ro', label=\n 'Sentiment Magnitude')\n ax.legend(loc='lower right')\n self.canvas.draw()\n self.enableExport()\n \"\"\"\n Gets username from text field\n Input - self:Ui_Window\n Output - string\n \"\"\"\n\n def get_username(self):\n return self.ui.usernameLineEdit.text()\n \"\"\"\n Gets start date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_start_date(self):\n start_month = self.ui.startMonthSpinBox.value()\n start_day = self.ui.startDaySpinBox.value()\n start_year = self.ui.startYearSpinBox.value()\n try:\n startDate = datetime.datetime(start_year, start_month, start_day)\n except:\n self.printMessages([\n 'Start date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return startDate\n \"\"\"\n Gets end date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n \"\"\"\n\n def get_end_date(self):\n end_month = self.ui.endMonthSpinBox.value()\n end_day = self.ui.endDaySpinBox.value()\n end_year = self.ui.endYearSpinBox.value()\n try:\n endDate = datetime.datetime(end_year, end_month, end_day)\n except:\n self.printMessages([\n 'End date is improperly set. Check to see that the date is correct/exists.'\n ])\n return None\n return endDate\n \"\"\"\n Toggles the export button.\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def enableExport(self):\n self.ui.exportPushButton.setEnabled(True)\n \"\"\"\n Exports date, score/magntitude, and tweet text to csv and pops up a window when done\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def exportValues(self):\n currentTimeDate = datetime.datetime.now()\n currentTimeDate = str(currentTimeDate.year) + '-' + str(currentTimeDate\n .month) + '-' + str(currentTimeDate.day) + '-' + str(\n currentTimeDate.hour) + '-' + str(currentTimeDate.minute\n ) + '-' + str(currentTimeDate.second)\n with open(currentTimeDate + '_' + self.get_username() +\n '_score.csv', mode='w') as score_file:\n writer = csv.writer(score_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[1]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n with open(currentTimeDate + '_' + self.get_username() +\n '_magnitude.csv', mode='w') as magnitude_file:\n writer = csv.writer(magnitude_file)\n for i in range(len(self.plotData[0])):\n writer.writerow([str(self.plotData[0][i]), self.plotData[2]\n [i], self.tweetList[i].full_text.encode(encoding=\n 'UTF-8', errors='replace')])\n msgBox = QMessageBox()\n msgBox.setText('CSV files exported!')\n msgBox.exec()\n \"\"\"\n Prints out messages in a pop up window\n Input - self:Ui_Window\n Output - None\n \"\"\"\n\n def printMessages(self, messageList):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setWindowTitle('Errors occured!')\n tempString = ''\n for message in messageList:\n tempString += message + '\\n'\n msgBox.setText(tempString)\n msgBox.exec()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = Ui_Window()\n window.show()\n sys.exit(app.exec_())\n",
"step-5": "#---------------------------------------------\n# File name: phase2app.py\n# Description: Launches GUI for Twitter User Timeline Sentiment Analysis program\n# Author: Gilbert Yap ([email protected])\n# Date: October 03, 2020\n#---------------------------------------------\n\nfrom PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout, QMessageBox\nfrom PySide2.QtCore import Qt, QFile, QRegExp\nfrom PySide2.QtGui import QRegExpValidator\nfrom phase2GUI import Ui_Dialog\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\n\nimport configparser, csv, datetime, sys\nsys.path.insert(1, '..\\\\SharedFiles\\\\')\nimport matplotlib.pyplot as plt\nimport helper, phase2Functions\n\nSETTINGS_FILE = '..\\\\SharedFiles\\\\settings.ini'\n\nclass Ui_Window(QDialog):\n def __init__(self):\n super(Ui_Window, self).__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n\n # Set regex validator for the username\n regex = QRegExp(\"\\w+\")\n validator = QRegExpValidator(regex)\n self.ui.usernameLineEdit.setValidator(validator)\n\n # Set the end date to today by default\n self.ui.endMonthSpinBox.setValue(datetime.datetime.now().month)\n self.ui.endDaySpinBox.setValue(datetime.datetime.now().day)\n self.ui.endYearSpinBox.setValue(datetime.datetime.now().year)\n \n # Place a plot inside of plotDisplayGroupBox\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n self.ui.plotDisplayGroupBox.setLayout(layout)\n\n # Set up signals\n self.ui.processDatesPushButton.clicked.connect(self.plotSentiment)\n self.ui.exportPushButton.clicked.connect(self.exportValues)\n\n # Init APIs\n settings = configparser.ConfigParser()\n settings.read(SETTINGS_FILE)\n\n helper.print_with_stars('Initializing APIs')\n (twitterApi, googleClient, errors) = phase2Functions.init_apis(settings['KEYS']['api_key'], settings['KEYS']['api_secret_key'])\n\n if(len(errors) > 0):\n self.printMessages(errors)\n sys.exit(1)\n else:\n self.twitterApi = twitterApi\n self.googleClient = googleClient\n self.show()\n\n '''\n Plot the sentiment score\n Input - self:Ui_Window\n Output - None\n '''\n def plotSentiment(self):\n QApplication.setOverrideCursor(Qt.WaitCursor)\n # Get the sentiment data\n startDate = self.get_start_date()\n endDate = self.get_end_date()\n \n if (startDate is None) or (endDate is None):\n return\n \n (dateList, scoreList, magnitudeList, tweetList, errors) = phase2Functions.generate_data_lists(self.twitterApi, self.googleClient, self.get_username(), startDate, endDate)\n QApplication.restoreOverrideCursor()\n \n # If there were any errors, print them out\n if(len(errors) > 0):\n self.printMessages(errors)\n else:\n # If there are no errors, format and plot out the data\n self.plotData = (dateList, scoreList, magnitudeList)\n self.tweetList = tweetList\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=0.88,\n bottom=0.255,\n left=0.17,\n right=0.9,\n hspace=0.2,\n wspace=0.2)\n\n ax.set_title(\"Sentiment Analysis of @{}'s tweets\".format(self.get_username(),)) \n ax.set_xlabel(\"Date\") \n ax.set_ylabel(\"Sentiment Value\") \n ax.xaxis.set_major_locator(plt.MaxNLocator(10))\n \n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n\n ax.plot(self.plotData[0],self.plotData[1],\"-bo\",label='Sentiment Score') \n ax.plot(self.plotData[0],self.plotData[2], \"-ro\",label='Sentiment Magnitude')\n ax.legend(loc=\"lower right\")\n self.canvas.draw()\n self.enableExport()\n\n\n '''\n Gets username from text field\n Input - self:Ui_Window\n Output - string\n '''\n def get_username(self):\n return (self.ui.usernameLineEdit.text())\n\n '''\n Gets start date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n '''\n def get_start_date(self):\n start_month = self.ui.startMonthSpinBox.value()\n start_day = self.ui.startDaySpinBox.value()\n start_year = self.ui.startYearSpinBox.value()\n \n try:\n startDate = datetime.datetime(start_year, start_month,start_day)\n except:\n self.printMessages(['Start date is improperly set. Check to see that the date is correct/exists.'])\n return None\n \n return startDate\n\n '''\n Gets end date from spin boxes\n Input - self:Ui_Window\n Output - datetime.datetime\n '''\n def get_end_date(self):\n end_month = self.ui.endMonthSpinBox.value()\n end_day = self.ui.endDaySpinBox.value()\n end_year = self.ui.endYearSpinBox.value()\n \n try:\n endDate = datetime.datetime(end_year, end_month,end_day)\n except:\n self.printMessages(['End date is improperly set. Check to see that the date is correct/exists.'])\n return None\n \n return endDate\n\n '''\n Toggles the export button.\n Input - self:Ui_Window\n Output - None\n '''\n def enableExport(self):\n self.ui.exportPushButton.setEnabled(True)\n\n '''\n Exports date, score/magntitude, and tweet text to csv and pops up a window when done\n Input - self:Ui_Window\n Output - None\n '''\n def exportValues(self):\n currentTimeDate = datetime.datetime.now()\n currentTimeDate = str(currentTimeDate.year)+'-'+str(currentTimeDate.month)+'-'+str(currentTimeDate.day)+'-'+str(currentTimeDate.hour)+'-'+str(currentTimeDate.minute)+'-'+str(currentTimeDate.second)\n\n with open(currentTimeDate+'_'+self.get_username()+'_score.csv', mode='w') as score_file:\n writer = csv.writer(score_file)\n for i in range(len(self.plotData[0])):\n writer.writerow( [ str(self.plotData[0][i]), self.plotData[1][i], \n self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )\n\n with open(currentTimeDate+'_'+self.get_username()+'_magnitude.csv', mode='w') as magnitude_file:\n writer = csv.writer(magnitude_file)\n for i in range(len(self.plotData[0])):\n writer.writerow( [ str(self.plotData[0][i]), self.plotData[2][i], \n self.tweetList[i].full_text.encode(encoding='UTF-8', errors='replace') ] )\n\n msgBox = QMessageBox()\n msgBox.setText('CSV files exported!')\n msgBox.exec()\n\n '''\n Prints out messages in a pop up window\n Input - self:Ui_Window\n Output - None\n '''\n def printMessages(self, messageList):\n msgBox = QMessageBox()\n msgBox.setIcon(QMessageBox.Critical)\n msgBox.setWindowTitle('Errors occured!')\n tempString = ''\n\n for message in messageList:\n tempString += (message + '\\n')\n msgBox.setText(tempString)\n msgBox.exec()\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n window = Ui_Window()\n window.show()\n\n sys.exit(app.exec_())",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
import pygame
from evolution import Darwin
from Sensor import Robot, obstacleArray
# Game Settings
pygame.init()
background_colour = (0, 0, 0)
(width, height) = (1000, 600)
target_location = (800, 300)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Omar's Simulation")
screen.fill(background_colour)
# GA Hyper parameters
population_size = 50
elitism = 4
# Agent Initialisation
robots = []
for i in range(population_size):
robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))
darwin = Darwin(robot_array=robots, population_size=population_size, elitism=4, mutation_rate=0.1)
if __name__ == '__main__':
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill(background_colour)
pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, height - 20), 1)
pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)
# pygame.draw.line(screen, (255, 0, 0), (800, 10), (800, 590))
for obstacle in obstacleArray:
obstacle.drawShape()
# obstacle.move_y()
# pygame.draw.circle(screen, (0, 0, 255), (500, 300), 100, 0)
# pygame.draw.circle(screen, (0, 255, 20), (200, 300), 75, 0)
# pygame.draw.polygon(screen, (255, 255, 255), new_list, 1)
# for pedestrian in all.start_pedestrians:
# pedestrian.move()
# pedestrian.update()
# all.introduce()
for robot in darwin.robot_array:
robot.move()
robot.update()
robot.collide()
robot.evaluate_fitness()
if darwin.check_if_all_dead():
darwin.get_stats()
darwin.make_next_generation()
pygame.display.update()
|
normal
|
{
"blob_id": "cbcbc0d01c32693ebbdbcf285efdc8e521c447ee",
"index": 3998,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npygame.init()\n<mask token>\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\n<mask token>\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\n<mask token>\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-3": "<mask token>\npygame.init()\nbackground_colour = 0, 0, 0\nwidth, height = 1000, 600\ntarget_location = 800, 300\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\npopulation_size = 50\nelitism = 4\nrobots = []\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size,\n elitism=4, mutation_rate=0.1)\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-4": "import pygame\nfrom evolution import Darwin\nfrom Sensor import Robot, obstacleArray\npygame.init()\nbackground_colour = 0, 0, 0\nwidth, height = 1000, 600\ntarget_location = 800, 300\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\npopulation_size = 50\nelitism = 4\nrobots = []\nfor i in range(population_size):\n robots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size,\n elitism=4, mutation_rate=0.1)\nif __name__ == '__main__':\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n screen.fill(background_colour)\n pygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, \n height - 20), 1)\n pygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n for obstacle in obstacleArray:\n obstacle.drawShape()\n for robot in darwin.robot_array:\n robot.move()\n robot.update()\n robot.collide()\n robot.evaluate_fitness()\n if darwin.check_if_all_dead():\n darwin.get_stats()\n darwin.make_next_generation()\n pygame.display.update()\n",
"step-5": "import pygame\nfrom evolution import Darwin\nfrom Sensor import Robot, obstacleArray\n\n\n# Game Settings\npygame.init()\nbackground_colour = (0, 0, 0)\n(width, height) = (1000, 600)\ntarget_location = (800, 300)\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption(\"Omar's Simulation\")\nscreen.fill(background_colour)\n\n\n# GA Hyper parameters\npopulation_size = 50\nelitism = 4\n\n# Agent Initialisation\nrobots = []\nfor i in range(population_size):\n\trobots.append(Robot(175, 300, 10, 360, 9, all, set_weights=None))\ndarwin = Darwin(robot_array=robots, population_size=population_size, elitism=4, mutation_rate=0.1)\n\n\n\nif __name__ == '__main__':\n\trunning = True\n\twhile running:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\t\tscreen.fill(background_colour)\n\t\tpygame.draw.rect(screen, (255, 255, 255), (10, 10, width - 20, height - 20), 1)\n\t\tpygame.draw.circle(screen, (255, 10, 0), target_location, 10, 0)\n\t\t# pygame.draw.line(screen, (255, 0, 0), (800, 10), (800, 590))\n\t\tfor obstacle in obstacleArray:\n\t\t\tobstacle.drawShape()\n\t\t# obstacle.move_y()\n\t\t# pygame.draw.circle(screen, (0, 0, 255), (500, 300), 100, 0)\n\t\t# pygame.draw.circle(screen, (0, 255, 20), (200, 300), 75, 0)\n\t\t# pygame.draw.polygon(screen, (255, 255, 255), new_list, 1)\n\t\t# for pedestrian in all.start_pedestrians:\n\t\t# \t\tpedestrian.move()\n\t\t# \t\tpedestrian.update()\n\t\t# \t\tall.introduce()\n\t\tfor robot in darwin.robot_array:\n\t\t\trobot.move()\n\t\t\trobot.update()\n\t\t\trobot.collide()\n\t\t\trobot.evaluate_fitness()\n\t\tif darwin.check_if_all_dead():\n\t\t\tdarwin.get_stats()\n\t\t\tdarwin.make_next_generation()\n\t\tpygame.display.update()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def exec(bucket_id, project_id, reverse_opt):
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
blob_list = []
try:
for blob in blobs:
this_blob = {'name': blob.name, 'owner': blob.owner, 'class':
blob.storage_class, 'size': blob.size, 'date': str(blob.
updated).split('.')[0].split('+')[0]}
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=
reverse_opt)
report_table = PrettyTable()
report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']
report_table.align['NAME'] = 'l'
report_table.align['SIZE'] = 'r'
report_table.align['DATE'] = 'r'
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'],
str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
<|reserved_special_token_1|>
from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
blob_list = []
try:
for blob in blobs:
this_blob = {'name': blob.name, 'owner': blob.owner, 'class':
blob.storage_class, 'size': blob.size, 'date': str(blob.
updated).split('.')[0].split('+')[0]}
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=
reverse_opt)
report_table = PrettyTable()
report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']
report_table.align['NAME'] = 'l'
report_table.align['SIZE'] = 'r'
report_table.align['DATE'] = 'r'
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'],
str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
<|reserved_special_token_1|>
## Filename: name.py
# Author: Marcelo Feitoza Parisi
#
# Description: Report the objects
# on the bucket sorted by name.
#
# ###########################
# # DISCLAIMER - IMPORTANT! #
# ###########################
#
# Stuff found here was built as a
# Proof-Of-Concept or Study material
# and should not be considered
# production ready!
#
# USE WITH CARE!
##
from lib import byte
from google.cloud import storage
from prettytable import PrettyTable
def exec(bucket_id, project_id, reverse_opt):
# Google Cloud Storage Client
client = storage.Client()
bucket = client.bucket(bucket_id, user_project=project_id)
blobs = bucket.list_blobs()
# Will hold our local list of objects
blob_list = []
try:
for blob in blobs:
# For each object we'll save name, owner, class, size and date
this_blob = { 'name': blob.name,
'owner': blob.owner,
'class': blob.storage_class,
'size' : blob.size,
'date' : str(blob.updated).split('.')[0].split('+')[0]
}
# Append object to our list
blob_list.append(this_blob)
except Exception as e:
print(e)
exit(1)
# Sort our object list by name using our reverse_opt
sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)
# Generating our PrettyTable
report_table = PrettyTable()
report_table.field_names = ["NAME", "OWNER", "CLASS", "SIZE", "DATE"]
report_table.align["NAME"] = "l"
report_table.align["SIZE"] = "r"
report_table.align["DATE"] = "r"
for blob in sorted_list:
report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])
print(report_table)
|
flexible
|
{
"blob_id": "562b2c3567e42699cfd0804a5780af7ede142e13",
"index": 1056,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"step-3": "from lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\n\ndef exec(bucket_id, project_id, reverse_opt):\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n blob_list = []\n try:\n for blob in blobs:\n this_blob = {'name': blob.name, 'owner': blob.owner, 'class':\n blob.storage_class, 'size': blob.size, 'date': str(blob.\n updated).split('.')[0].split('+')[0]}\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=\n reverse_opt)\n report_table = PrettyTable()\n report_table.field_names = ['NAME', 'OWNER', 'CLASS', 'SIZE', 'DATE']\n report_table.align['NAME'] = 'l'\n report_table.align['SIZE'] = 'r'\n report_table.align['DATE'] = 'r'\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'],\n str(byte.convert_size(blob['size'])), blob['date']])\n print(report_table)\n",
"step-4": "## Filename: name.py\n # Author: Marcelo Feitoza Parisi\n # \n # Description: Report the objects\n # on the bucket sorted by name.\n # \n # ###########################\n # # DISCLAIMER - IMPORTANT! #\n # ###########################\n # \n # Stuff found here was built as a\n # Proof-Of-Concept or Study material\n # and should not be considered\n # production ready!\n # \n # USE WITH CARE!\n##\nfrom lib import byte\nfrom google.cloud import storage\nfrom prettytable import PrettyTable\n\ndef exec(bucket_id, project_id, reverse_opt):\n\n # Google Cloud Storage Client\n client = storage.Client()\n bucket = client.bucket(bucket_id, user_project=project_id)\n blobs = bucket.list_blobs()\n\n # Will hold our local list of objects\n blob_list = []\n\n try: \n for blob in blobs:\n # For each object we'll save name, owner, class, size and date\n this_blob = { 'name': blob.name,\n 'owner': blob.owner,\n 'class': blob.storage_class,\n 'size' : blob.size,\n 'date' : str(blob.updated).split('.')[0].split('+')[0]\n }\n # Append object to our list\n blob_list.append(this_blob)\n except Exception as e:\n print(e)\n exit(1)\n\n # Sort our object list by name using our reverse_opt\n sorted_list = sorted(blob_list, key=lambda k: blob.name, reverse=reverse_opt)\n\n # Generating our PrettyTable\n report_table = PrettyTable()\n report_table.field_names = [\"NAME\", \"OWNER\", \"CLASS\", \"SIZE\", \"DATE\"]\n report_table.align[\"NAME\"] = \"l\"\n report_table.align[\"SIZE\"] = \"r\"\n report_table.align[\"DATE\"] = \"r\"\n for blob in sorted_list:\n report_table.add_row([blob['name'], blob['owner'], blob['class'], str(byte.convert_size(blob['size'])), blob['date']])\n\n print(report_table)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 20:47:28 2019
@author: jaco
"""
|
flexible
|
{
"blob_id": "d806d1b31712e3d8d60f4bfbc60c6939dfeeb357",
"index": 9579,
"step-1": "<mask token>\n",
"step-2": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 16 20:47:28 2019\n\n@author: jaco\n\"\"\"\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph('profit-centers', logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,
total - new_pc, total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@transaction.atomic
def update_profit_center(pc):
profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=
pc['id'], defaults=dict(name=pc['name']))
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.save()
return created
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph('profit-centers', logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,
total - new_pc, total)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logger = logging.getLogger(__name__)
@transaction.atomic
def update_profit_center(pc):
profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=
pc['id'], defaults=dict(name=pc['name']))
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.save()
return created
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph('profit-centers', logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,
total - new_pc, total)
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from django.db import transaction
from ralph_scrooge.models import ProfitCenter
from ralph_scrooge.plugins import plugin_runner
from ralph_scrooge.plugins.collect.utils import get_from_ralph
logger = logging.getLogger(__name__)
@transaction.atomic
def update_profit_center(pc):
profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=
pc['id'], defaults=dict(name=pc['name']))
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.save()
return created
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph('profit-centers', logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,
total - new_pc, total)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from django.db import transaction
from ralph_scrooge.models import ProfitCenter
from ralph_scrooge.plugins import plugin_runner
from ralph_scrooge.plugins.collect.utils import get_from_ralph
logger = logging.getLogger(__name__)
@transaction.atomic
def update_profit_center(pc):
profit_center, created = ProfitCenter.objects.get_or_create(
ralph3_id=pc['id'],
defaults=dict(
name=pc['name'],
)
)
profit_center.name = pc['name']
profit_center.description = pc['description']
profit_center.save()
return created
@plugin_runner.register(chain='scrooge')
def ralph3_profit_center(**kwargs):
new_pc = total = 0
for pc in get_from_ralph("profit-centers", logger):
created = update_profit_center(pc)
if created:
new_pc += 1
total += 1
return True, '{} new profit center(s), {} updated, {} total'.format(
new_pc,
total - new_pc,
total,
)
|
flexible
|
{
"blob_id": "d3f52d4713ba4b7b4cd736b26809968e259be63c",
"index": 6883,
"step-1": "<mask token>\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-2": "<mask token>\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-3": "<mask token>\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nimport logging\nfrom django.db import transaction\nfrom ralph_scrooge.models import ProfitCenter\nfrom ralph_scrooge.plugins import plugin_runner\nfrom ralph_scrooge.plugins.collect.utils import get_from_ralph\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(ralph3_id=\n pc['id'], defaults=dict(name=pc['name']))\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph('profit-centers', logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(new_pc,\n total - new_pc, total)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom django.db import transaction\n\nfrom ralph_scrooge.models import ProfitCenter\nfrom ralph_scrooge.plugins import plugin_runner\nfrom ralph_scrooge.plugins.collect.utils import get_from_ralph\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]\ndef update_profit_center(pc):\n profit_center, created = ProfitCenter.objects.get_or_create(\n ralph3_id=pc['id'],\n defaults=dict(\n name=pc['name'],\n )\n )\n profit_center.name = pc['name']\n profit_center.description = pc['description']\n profit_center.save()\n return created\n\n\n@plugin_runner.register(chain='scrooge')\ndef ralph3_profit_center(**kwargs):\n new_pc = total = 0\n for pc in get_from_ralph(\"profit-centers\", logger):\n created = update_profit_center(pc)\n if created:\n new_pc += 1\n total += 1\n return True, '{} new profit center(s), {} updated, {} total'.format(\n new_pc,\n total - new_pc,\n total,\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
<|reserved_special_token_0|>
print('[*]Executing. : %s') % url
<|reserved_special_token_0|>
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
<|reserved_special_token_0|>
print('[*]Excecuting : %s') % url
<|reserved_special_token_0|>
print('-' * 55)
print('[*]Flag : %s') % op
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
upayload = upayload + ' --+'
url = url + upayload
print('[*]Executing. : %s') % url
op = r.get(url)
op = str(op.text)
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
upayload = upayload.replace('2',
"load_file('/var/www/html/administrat/panel.php')")
url = 'http://docker.hackthebox.eu:' + port + uri + upayload
print('[*]Excecuting : %s') % url
op = r.get(url)
op = str(op.text)
op = re.search('HTB.*?<', op)
op = str(op.group())
op = op.replace('<', '')
print('-' * 55)
print('[*]Flag : %s') % op
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv) < 2:
print('Syntax : python %s <port>') % str(sys.argv[0])
else:
print('-' * 55)
print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')
print('-' * 55)
r = requests.session()
port = str(sys.argv[1])
url = 'http://docker.hackthebox.eu:'
url = url + port
uri = '/portfolio.php?id=1'
url = url + uri
print('[*]SQLi Affected URI : %s') % uri
print('[*]Counting Columns')
for x in range(1, 20):
payload = ' order by %i --+' % x
nurl = url + payload
op = r.get(nurl)
soup = BeautifulSoup(op.text, 'html.parser')
soup = soup.find('p')
soup = str(soup)
size = len(soup.split())
print('[*]Page size at order by %s : %s') % (x, size)
if size < 36:
col = x - 1
break
print('-' * 55)
print('[*]Number of Columns : %d') % col
print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')
print("[*]Trying to read content of '/var/www/html/administrat/panel.php'")
upayload = ' union all select 1'
for x in range(2, col + 1):
x = str(x)
upayload = upayload + ',' + x
upayload = upayload + ' --+'
url = url + upayload
print('[*]Executing. : %s') % url
op = r.get(url)
op = str(op.text)
if op.find('2'):
print('[*]Column 2 is reflected')
print('[*]Injecting payloads in column 2....')
upayload = upayload.replace('2',
"load_file('/var/www/html/administrat/panel.php')")
url = 'http://docker.hackthebox.eu:' + port + uri + upayload
print('[*]Excecuting : %s') % url
op = r.get(url)
op = str(op.text)
op = re.search('HTB.*?<', op)
op = str(op.group())
op = op.replace('<', '')
print('-' * 55)
print('[*]Flag : %s') % op
<|reserved_special_token_1|>
import requests
from bs4 import BeautifulSoup
import sys
import re
if len(sys.argv)<2:
print("Syntax : python %s <port>")%(str(sys.argv[0]))
else:
print('-'*55)
print("HTB WEB-CHALLENGE coded by ZyperX [Freelance]")
print('-'*55)
r=requests.session()
port=str(sys.argv[1])
url="http://docker.hackthebox.eu:"
url=url+port
uri="/portfolio.php?id=1"
url=url+uri
print("[*]SQLi Affected URI : %s")%(uri)
print("[*]Counting Columns")
for x in range(1,20):
payload=(" order by %i --+")%(x)
nurl=url+payload
op=r.get(nurl)
soup=BeautifulSoup(op.text,'html.parser')
soup=soup.find('p')
soup=str(soup)
size=len(soup.split())
print("[*]Page size at order by %s : %s")%(x,size)
if size < 36 :
col= x-1
break
print("-"*55)
print("[*]Number of Columns : %d")%(col)
print("[*]Web App Vulnerable with FILE PRIVILEGE SQLI")
print("[*]Trying to read content of \'/var/www/html/administrat/panel.php\'")
upayload=" union all select 1"
for x in range(2,col+1):
x=str(x)
upayload=upayload+","+x
upayload=upayload+" --+"
url=url+upayload
print("[*]Executing. : %s")%(url)
op=r.get(url)
op=str(op.text)
if op.find("2"):
print("[*]Column 2 is reflected");
print("[*]Injecting payloads in column 2....");
upayload=upayload.replace('2','load_file(\'/var/www/html/administrat/panel.php\')')
url="http://docker.hackthebox.eu:"+port+uri+upayload
print("[*]Excecuting : %s")%(url)
op=r.get(url)
op=str(op.text)
op=re.search("HTB.*?<",op)
op=str(op.group())
op=op.replace('<','')
print("-"*55)
print("[*]Flag : %s")%(op)
|
flexible
|
{
"blob_id": "88ec9484e934ce27b13734ca26f79df71b7677e6",
"index": 82,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\n<mask token>\nprint('[*]Executing. : %s') % url\n<mask token>\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\n<mask token>\nprint('[*]Excecuting : %s') % url\n<mask token>\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv) < 2:\n print('Syntax : python %s <port>') % str(sys.argv[0])\nelse:\n print('-' * 55)\n print('HTB WEB-CHALLENGE coded by ZyperX [Freelance]')\n print('-' * 55)\n r = requests.session()\n port = str(sys.argv[1])\n url = 'http://docker.hackthebox.eu:'\n url = url + port\n uri = '/portfolio.php?id=1'\n url = url + uri\n print('[*]SQLi Affected URI : %s') % uri\n print('[*]Counting Columns')\n for x in range(1, 20):\n payload = ' order by %i --+' % x\n nurl = url + payload\n op = r.get(nurl)\n soup = BeautifulSoup(op.text, 'html.parser')\n soup = soup.find('p')\n soup = str(soup)\n size = len(soup.split())\n print('[*]Page size at order by %s : %s') % (x, size)\n if size < 36:\n col = x - 1\n break\n print('-' * 55)\n print('[*]Number of Columns : %d') % col\n print('[*]Web App Vulnerable with FILE PRIVILEGE SQLI')\n print(\"[*]Trying to read content of '/var/www/html/administrat/panel.php'\")\n upayload = ' union all select 1'\n for x in range(2, col + 1):\n x = str(x)\n upayload = upayload + ',' + x\nupayload = upayload + ' --+'\nurl = url + upayload\nprint('[*]Executing. : %s') % url\nop = r.get(url)\nop = str(op.text)\nif op.find('2'):\n print('[*]Column 2 is reflected')\n print('[*]Injecting payloads in column 2....')\nupayload = upayload.replace('2',\n \"load_file('/var/www/html/administrat/panel.php')\")\nurl = 'http://docker.hackthebox.eu:' + port + uri + upayload\nprint('[*]Excecuting : %s') % url\nop = r.get(url)\nop = str(op.text)\nop = re.search('HTB.*?<', op)\nop = str(op.group())\nop = op.replace('<', '')\nprint('-' * 55)\nprint('[*]Flag : %s') % op\n",
"step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport sys\nimport re\nif len(sys.argv)<2:\n print(\"Syntax : python %s <port>\")%(str(sys.argv[0]))\nelse:\n print('-'*55)\n print(\"HTB WEB-CHALLENGE coded by ZyperX [Freelance]\")\n print('-'*55)\n r=requests.session()\n port=str(sys.argv[1])\n url=\"http://docker.hackthebox.eu:\"\n url=url+port\n uri=\"/portfolio.php?id=1\"\n url=url+uri\n print(\"[*]SQLi Affected URI : %s\")%(uri)\n print(\"[*]Counting Columns\")\n for x in range(1,20):\n payload=(\" order by %i --+\")%(x)\n nurl=url+payload\n op=r.get(nurl)\n soup=BeautifulSoup(op.text,'html.parser')\n soup=soup.find('p')\n soup=str(soup)\n size=len(soup.split())\n print(\"[*]Page size at order by %s : %s\")%(x,size)\n if size < 36 :\n col= x-1\n break \n print(\"-\"*55)\n print(\"[*]Number of Columns : %d\")%(col)\n print(\"[*]Web App Vulnerable with FILE PRIVILEGE SQLI\")\n print(\"[*]Trying to read content of \\'/var/www/html/administrat/panel.php\\'\")\n upayload=\" union all select 1\"\n for x in range(2,col+1):\n x=str(x)\n upayload=upayload+\",\"+x\nupayload=upayload+\" --+\"\nurl=url+upayload\nprint(\"[*]Executing. : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nif op.find(\"2\"):\n print(\"[*]Column 2 is reflected\");\n print(\"[*]Injecting payloads in column 2....\");\nupayload=upayload.replace('2','load_file(\\'/var/www/html/administrat/panel.php\\')')\nurl=\"http://docker.hackthebox.eu:\"+port+uri+upayload\nprint(\"[*]Excecuting : %s\")%(url)\nop=r.get(url)\nop=str(op.text)\nop=re.search(\"HTB.*?<\",op)\nop=str(op.group())\nop=op.replace('<','')\nprint(\"-\"*55)\nprint(\"[*]Flag : %s\")%(op)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
stock_in = By.XPATH, "//android.widget.TextView[contains(@text,'门店入库')]"
transfer_in = By.XPATH, "//android.widget.TextView[contains(@text,'调拨入库')]"
take_receive = By.ID, '%s:id/take_receive' % basePage.package_name
details_text = By.ID, '%s:id/details_text' % basePage.package_name
transfer_confirm_state = By.ID, '%s:id/state'
transfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'
text_confirm_button = By.ID, '%s:id/text_confirm'
diff_confirm_button = (By.XPATH,
"//android.widget.TextView[contains(@text,'差异收货')]")
state_num = random.randint(1, 4)
order_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num
title = By.ID, '%s:id/title' % basePage.package_name
fold_image = By.ID, '%s:id/fold_image' % basePage.package_name
order_search = By.ID, '%s:id/order_search' % basePage.package_name
search_query = By.ID, '%s:id/search_query' % basePage.package_name
search_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name
search_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name
search_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name
type_edit = By.ID, '%s:id/type_edit' % basePage.package_name
transfer_options1 = By.ID, '%s:id/options1' % basePage.package_name
transfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name
all_check = By.ID, '%s:id/all_check' % basePage.package_name
out_check = By.ID, '%s:id/out_check' % basePage.package_name
in_check = By.ID, '%s:id/in_check' % basePage.package_name
operate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name
search_clear = By.ID, '%s:id/search_clear' % basePage.package_name
search_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name
order_state = By.XPATH, "//android.widget.TextView[contains(@text,'已完成')]"
allocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name
start_at = By.ID, '%s:id/start_at' % basePage.package_name
end_at = By.ID, '%s:id/end_at' % basePage.package_name
day = By.ID, '%s:id/day' % basePage.package_name
btn_view_diff = By.CLASS_NAME, 'btn-view-diff'
searchIcon = By.ID, 'searchIcon'
input_item = By.CLASS_NAME, 'input-item'
icon_delete = (By.XPATH,
"//div[@class='keyboard']/div[1]/img[@class='icon-delete']")
back_btn = By.XPATH, "//div[@class='icon-back']/img[@alt='<']"
btn_save = By.CLASS_NAME, 'btn-save'
add_handle = By.XPATH, "//div[@class='before-focus']/div[1]"
add_border_node = By.XPATH, "//div[@class='before-focus']/div[2]"
loggingimport = By.XPATH, "//div[@class='before-focus']/div[3]"
btn_more = By.CLASS_NAME, 'btn-more'
btn_close_native = By.CLASS_NAME, 'btn-close-native'
icon_edit = (By.XPATH,
"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]"
)
div_num = random.randint(1, 9)
num_key = By.XPATH, "//div[@class='keyboard']/div[2]/div[%s]" % div_num
num_keys = By.XPATH, "//div[@class='keyboard']/div[2]"
key_confirm = By.XPATH, "//div[@class='keyboard']/div[2]/div[12]"
result_item = By.CLASS_NAME, 'result-item'
<|reserved_special_token_1|>
from selenium.webdriver.common.by import By
import random
import basePage
stock_in = By.XPATH, "//android.widget.TextView[contains(@text,'门店入库')]"
transfer_in = By.XPATH, "//android.widget.TextView[contains(@text,'调拨入库')]"
take_receive = By.ID, '%s:id/take_receive' % basePage.package_name
details_text = By.ID, '%s:id/details_text' % basePage.package_name
transfer_confirm_state = By.ID, '%s:id/state'
transfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'
text_confirm_button = By.ID, '%s:id/text_confirm'
diff_confirm_button = (By.XPATH,
"//android.widget.TextView[contains(@text,'差异收货')]")
state_num = random.randint(1, 4)
order_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num
title = By.ID, '%s:id/title' % basePage.package_name
fold_image = By.ID, '%s:id/fold_image' % basePage.package_name
order_search = By.ID, '%s:id/order_search' % basePage.package_name
search_query = By.ID, '%s:id/search_query' % basePage.package_name
search_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name
search_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name
search_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name
type_edit = By.ID, '%s:id/type_edit' % basePage.package_name
transfer_options1 = By.ID, '%s:id/options1' % basePage.package_name
transfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name
all_check = By.ID, '%s:id/all_check' % basePage.package_name
out_check = By.ID, '%s:id/out_check' % basePage.package_name
in_check = By.ID, '%s:id/in_check' % basePage.package_name
operate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name
search_clear = By.ID, '%s:id/search_clear' % basePage.package_name
search_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name
order_state = By.XPATH, "//android.widget.TextView[contains(@text,'已完成')]"
allocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name
start_at = By.ID, '%s:id/start_at' % basePage.package_name
end_at = By.ID, '%s:id/end_at' % basePage.package_name
day = By.ID, '%s:id/day' % basePage.package_name
btn_view_diff = By.CLASS_NAME, 'btn-view-diff'
searchIcon = By.ID, 'searchIcon'
input_item = By.CLASS_NAME, 'input-item'
icon_delete = (By.XPATH,
"//div[@class='keyboard']/div[1]/img[@class='icon-delete']")
back_btn = By.XPATH, "//div[@class='icon-back']/img[@alt='<']"
btn_save = By.CLASS_NAME, 'btn-save'
add_handle = By.XPATH, "//div[@class='before-focus']/div[1]"
add_border_node = By.XPATH, "//div[@class='before-focus']/div[2]"
loggingimport = By.XPATH, "//div[@class='before-focus']/div[3]"
btn_more = By.CLASS_NAME, 'btn-more'
btn_close_native = By.CLASS_NAME, 'btn-close-native'
icon_edit = (By.XPATH,
"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]"
)
div_num = random.randint(1, 9)
num_key = By.XPATH, "//div[@class='keyboard']/div[2]/div[%s]" % div_num
num_keys = By.XPATH, "//div[@class='keyboard']/div[2]"
key_confirm = By.XPATH, "//div[@class='keyboard']/div[2]/div[12]"
result_item = By.CLASS_NAME, 'result-item'
<|reserved_special_token_1|>
#!/usr/bin/python
# encoding:utf-8
from selenium.webdriver.common.by import By
import random
import basePage
# 门店入库button
stock_in = (By.XPATH, "//android.widget.TextView[contains(@text,'门店入库')]")
# 调拨入库button
transfer_in = (By.XPATH, "//android.widget.TextView[contains(@text,'调拨入库')]")
# 确认签收button
take_receive = (By.ID, '%s:id/take_receive'%basePage.package_name)
# 查看详情
details_text = (By.ID, '%s:id/details_text'%basePage.package_name)
# 调拨单代签收状态
transfer_confirm_state = (By.ID, "%s:id/state")
# 差异签收弹出框
transfer_diff_wizard = (By.ID, "%s:id/multiple_dialog_container")
# 确认签收按钮
text_confirm_button = (By.ID, "%s:id/text_confirm")
# 差异收货button
diff_confirm_button = (By.XPATH, "//android.widget.TextView[contains(@text,'差异收货')]")
# 订单状态
state_num = random.randint(1, 4)
order_of_state = (By.XPATH, "//android.widget.TextView[%s]" % state_num)
# 订单状态下拉
title = (By.ID, '%s:id/title'%basePage.package_name)
# 展开订单详情
fold_image = (By.ID, '%s:id/fold_image'%basePage.package_name)
# 高级搜索button
order_search = (By.ID, '%s:id/order_search'%basePage.package_name)
# 查询
search_query = (By.ID, '%s:id/search_query'%basePage.package_name)
# 调拨单号输入框
search_order_no = (By.ID, '%s:id/search_order_no'%basePage.package_name)
# 商品编码输入框
search_order_sku = (By.ID, '%s:id/search_order_sku'%basePage.package_name)
# 发货店仓输入框
search_order_org = (By.ID, '%s:id/search_order_org'%basePage.package_name)
# 调拨类型
type_edit = (By.ID, '%s:id/type_edit'%basePage.package_name)
# 调拨类型option
transfer_options1 = (By.ID, '%s:id/options1'%basePage.package_name)
transfer_options_submit = (By.ID, '%s:id/btnSubmit'%basePage.package_name)
# 日期范围
all_check = (By.ID, '%s:id/all_check'%basePage.package_name)
out_check = (By.ID, '%s:id/out_check'%basePage.package_name)
in_check = (By.ID, '%s:id/in_check'%basePage.package_name)
# 操作人输入框
operate_edit = (By.ID, '%s:id/operate_edit'%basePage.package_name)
# 重置
search_clear = (By.ID, '%s:id/search_clear'%basePage.package_name)
# 取消
search_up_cancel = (By.ID, '%s:id/search_up_cancel'%basePage.package_name)
# 调拨单状态
order_state = (By.XPATH, "//android.widget.TextView[contains(@text,'已完成')]")
# 调拨单号
allocate_name = (By.ID, '%s:id/allocate_name'%basePage.package_name)
# 高级搜索,选择开始日期
start_at = (By.ID, '%s:id/start_at'%basePage.package_name)
# 高级搜索,选择结束日期
end_at = (By.ID, '%s:id/end_at'%basePage.package_name)
# 高级搜索,选择日
day = (By.ID, '%s:id/day'%basePage.package_name)
# H5定位
# 只看差异
btn_view_diff = (By.CLASS_NAME, 'btn-view-diff')
# 搜索button
searchIcon = (By.ID, 'searchIcon')
# 搜索条件
input_item = (By.CLASS_NAME, 'input-item')
# 清空搜索内容
icon_delete = (By.XPATH, "//div[@class='keyboard']/div[1]/img[@class='icon-delete']")
# 返回
back_btn = (By.XPATH, "//div[@class='icon-back']/img[@alt='<']")
# 保存
btn_save = (By.CLASS_NAME, 'btn-save')
# 手工添加
add_handle = (By.XPATH, "//div[@class='before-focus']/div[1]")
# 扫码添加
add_border_node = (By.XPATH, "//div[@class='before-focus']/div[2]")
# 导入采集
loggingimport = (By.XPATH, "//div[@class='before-focus']/div[3]")
# 更多
btn_more = (By.CLASS_NAME, 'btn-more')
# 清空列表
btn_close_native = (By.CLASS_NAME, 'btn-close-native')
# 点击修改收货数量
icon_edit = (By.XPATH, "//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]")
# 填写收货数量
div_num = random.randint(1,9)
num_key = (By.XPATH, "//div[@class='keyboard']/div[2]/div[%s]"%div_num)
num_keys = (By.XPATH, "//div[@class='keyboard']/div[2]")
# 确认修改收货数量
key_confirm = (By.XPATH, "//div[@class='keyboard']/div[2]/div[12]")
# 订单内容
result_item = (By.CLASS_NAME, 'result-item')
|
flexible
|
{
"blob_id": "d1b025ddbf7d0ad48ff92a098d074820a3eb35ed",
"index": 6723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstock_in = By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\"\ntransfer_in = By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\"\ntake_receive = By.ID, '%s:id/take_receive' % basePage.package_name\ndetails_text = By.ID, '%s:id/details_text' % basePage.package_name\ntransfer_confirm_state = By.ID, '%s:id/state'\ntransfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'\ntext_confirm_button = By.ID, '%s:id/text_confirm'\ndiff_confirm_button = (By.XPATH,\n \"//android.widget.TextView[contains(@text,'差异收货')]\")\nstate_num = random.randint(1, 4)\norder_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num\ntitle = By.ID, '%s:id/title' % basePage.package_name\nfold_image = By.ID, '%s:id/fold_image' % basePage.package_name\norder_search = By.ID, '%s:id/order_search' % basePage.package_name\nsearch_query = By.ID, '%s:id/search_query' % basePage.package_name\nsearch_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name\nsearch_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name\nsearch_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name\ntype_edit = By.ID, '%s:id/type_edit' % basePage.package_name\ntransfer_options1 = By.ID, '%s:id/options1' % basePage.package_name\ntransfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name\nall_check = By.ID, '%s:id/all_check' % basePage.package_name\nout_check = By.ID, '%s:id/out_check' % basePage.package_name\nin_check = By.ID, '%s:id/in_check' % basePage.package_name\noperate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name\nsearch_clear = By.ID, '%s:id/search_clear' % basePage.package_name\nsearch_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name\norder_state = By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\"\nallocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name\nstart_at = By.ID, '%s:id/start_at' % basePage.package_name\nend_at = By.ID, '%s:id/end_at' % basePage.package_name\nday = By.ID, '%s:id/day' % basePage.package_name\nbtn_view_diff = By.CLASS_NAME, 'btn-view-diff'\nsearchIcon = By.ID, 'searchIcon'\ninput_item = By.CLASS_NAME, 'input-item'\nicon_delete = (By.XPATH,\n \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\nback_btn = By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\"\nbtn_save = By.CLASS_NAME, 'btn-save'\nadd_handle = By.XPATH, \"//div[@class='before-focus']/div[1]\"\nadd_border_node = By.XPATH, \"//div[@class='before-focus']/div[2]\"\nloggingimport = By.XPATH, \"//div[@class='before-focus']/div[3]\"\nbtn_more = By.CLASS_NAME, 'btn-more'\nbtn_close_native = By.CLASS_NAME, 'btn-close-native'\nicon_edit = (By.XPATH,\n \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\"\n )\ndiv_num = random.randint(1, 9)\nnum_key = By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\" % div_num\nnum_keys = By.XPATH, \"//div[@class='keyboard']/div[2]\"\nkey_confirm = By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\"\nresult_item = By.CLASS_NAME, 'result-item'\n",
"step-3": "from selenium.webdriver.common.by import By\nimport random\nimport basePage\nstock_in = By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\"\ntransfer_in = By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\"\ntake_receive = By.ID, '%s:id/take_receive' % basePage.package_name\ndetails_text = By.ID, '%s:id/details_text' % basePage.package_name\ntransfer_confirm_state = By.ID, '%s:id/state'\ntransfer_diff_wizard = By.ID, '%s:id/multiple_dialog_container'\ntext_confirm_button = By.ID, '%s:id/text_confirm'\ndiff_confirm_button = (By.XPATH,\n \"//android.widget.TextView[contains(@text,'差异收货')]\")\nstate_num = random.randint(1, 4)\norder_of_state = By.XPATH, '//android.widget.TextView[%s]' % state_num\ntitle = By.ID, '%s:id/title' % basePage.package_name\nfold_image = By.ID, '%s:id/fold_image' % basePage.package_name\norder_search = By.ID, '%s:id/order_search' % basePage.package_name\nsearch_query = By.ID, '%s:id/search_query' % basePage.package_name\nsearch_order_no = By.ID, '%s:id/search_order_no' % basePage.package_name\nsearch_order_sku = By.ID, '%s:id/search_order_sku' % basePage.package_name\nsearch_order_org = By.ID, '%s:id/search_order_org' % basePage.package_name\ntype_edit = By.ID, '%s:id/type_edit' % basePage.package_name\ntransfer_options1 = By.ID, '%s:id/options1' % basePage.package_name\ntransfer_options_submit = By.ID, '%s:id/btnSubmit' % basePage.package_name\nall_check = By.ID, '%s:id/all_check' % basePage.package_name\nout_check = By.ID, '%s:id/out_check' % basePage.package_name\nin_check = By.ID, '%s:id/in_check' % basePage.package_name\noperate_edit = By.ID, '%s:id/operate_edit' % basePage.package_name\nsearch_clear = By.ID, '%s:id/search_clear' % basePage.package_name\nsearch_up_cancel = By.ID, '%s:id/search_up_cancel' % basePage.package_name\norder_state = By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\"\nallocate_name = By.ID, '%s:id/allocate_name' % basePage.package_name\nstart_at = By.ID, '%s:id/start_at' % basePage.package_name\nend_at = By.ID, '%s:id/end_at' % basePage.package_name\nday = By.ID, '%s:id/day' % basePage.package_name\nbtn_view_diff = By.CLASS_NAME, 'btn-view-diff'\nsearchIcon = By.ID, 'searchIcon'\ninput_item = By.CLASS_NAME, 'input-item'\nicon_delete = (By.XPATH,\n \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\nback_btn = By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\"\nbtn_save = By.CLASS_NAME, 'btn-save'\nadd_handle = By.XPATH, \"//div[@class='before-focus']/div[1]\"\nadd_border_node = By.XPATH, \"//div[@class='before-focus']/div[2]\"\nloggingimport = By.XPATH, \"//div[@class='before-focus']/div[3]\"\nbtn_more = By.CLASS_NAME, 'btn-more'\nbtn_close_native = By.CLASS_NAME, 'btn-close-native'\nicon_edit = (By.XPATH,\n \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\"\n )\ndiv_num = random.randint(1, 9)\nnum_key = By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\" % div_num\nnum_keys = By.XPATH, \"//div[@class='keyboard']/div[2]\"\nkey_confirm = By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\"\nresult_item = By.CLASS_NAME, 'result-item'\n",
"step-4": "#!/usr/bin/python\n# encoding:utf-8\nfrom selenium.webdriver.common.by import By\nimport random\nimport basePage\n\n# 门店入库button\nstock_in = (By.XPATH, \"//android.widget.TextView[contains(@text,'门店入库')]\")\n# 调拨入库button\ntransfer_in = (By.XPATH, \"//android.widget.TextView[contains(@text,'调拨入库')]\")\n# 确认签收button\ntake_receive = (By.ID, '%s:id/take_receive'%basePage.package_name)\n# 查看详情\ndetails_text = (By.ID, '%s:id/details_text'%basePage.package_name)\n\n# 调拨单代签收状态\ntransfer_confirm_state = (By.ID, \"%s:id/state\")\n\n# 差异签收弹出框\ntransfer_diff_wizard = (By.ID, \"%s:id/multiple_dialog_container\")\n# 确认签收按钮\ntext_confirm_button = (By.ID, \"%s:id/text_confirm\")\n# 差异收货button\ndiff_confirm_button = (By.XPATH, \"//android.widget.TextView[contains(@text,'差异收货')]\")\n\n# 订单状态\nstate_num = random.randint(1, 4)\norder_of_state = (By.XPATH, \"//android.widget.TextView[%s]\" % state_num)\n# 订单状态下拉\ntitle = (By.ID, '%s:id/title'%basePage.package_name)\n# 展开订单详情\nfold_image = (By.ID, '%s:id/fold_image'%basePage.package_name)\n\n# 高级搜索button\norder_search = (By.ID, '%s:id/order_search'%basePage.package_name)\n# 查询\nsearch_query = (By.ID, '%s:id/search_query'%basePage.package_name)\n# 调拨单号输入框\nsearch_order_no = (By.ID, '%s:id/search_order_no'%basePage.package_name)\n# 商品编码输入框\nsearch_order_sku = (By.ID, '%s:id/search_order_sku'%basePage.package_name)\n# 发货店仓输入框\nsearch_order_org = (By.ID, '%s:id/search_order_org'%basePage.package_name)\n# 调拨类型\ntype_edit = (By.ID, '%s:id/type_edit'%basePage.package_name)\n# 调拨类型option\ntransfer_options1 = (By.ID, '%s:id/options1'%basePage.package_name)\ntransfer_options_submit = (By.ID, '%s:id/btnSubmit'%basePage.package_name)\n\n# 日期范围\nall_check = (By.ID, '%s:id/all_check'%basePage.package_name)\nout_check = (By.ID, '%s:id/out_check'%basePage.package_name)\nin_check = (By.ID, '%s:id/in_check'%basePage.package_name)\n# 操作人输入框\noperate_edit = (By.ID, '%s:id/operate_edit'%basePage.package_name)\n# 重置\nsearch_clear = (By.ID, '%s:id/search_clear'%basePage.package_name)\n# 取消\nsearch_up_cancel = (By.ID, '%s:id/search_up_cancel'%basePage.package_name)\n# 调拨单状态\norder_state = (By.XPATH, \"//android.widget.TextView[contains(@text,'已完成')]\")\n# 调拨单号\nallocate_name = (By.ID, '%s:id/allocate_name'%basePage.package_name)\n# 高级搜索,选择开始日期\nstart_at = (By.ID, '%s:id/start_at'%basePage.package_name)\n# 高级搜索,选择结束日期\nend_at = (By.ID, '%s:id/end_at'%basePage.package_name)\n# 高级搜索,选择日\nday = (By.ID, '%s:id/day'%basePage.package_name)\n\n# H5定位\n# 只看差异\nbtn_view_diff = (By.CLASS_NAME, 'btn-view-diff')\n# 搜索button\nsearchIcon = (By.ID, 'searchIcon')\n# 搜索条件\ninput_item = (By.CLASS_NAME, 'input-item')\n# 清空搜索内容\nicon_delete = (By.XPATH, \"//div[@class='keyboard']/div[1]/img[@class='icon-delete']\")\n# 返回\nback_btn = (By.XPATH, \"//div[@class='icon-back']/img[@alt='<']\")\n# 保存\nbtn_save = (By.CLASS_NAME, 'btn-save')\n# 手工添加\nadd_handle = (By.XPATH, \"//div[@class='before-focus']/div[1]\")\n# 扫码添加\nadd_border_node = (By.XPATH, \"//div[@class='before-focus']/div[2]\")\n# 导入采集\nloggingimport = (By.XPATH, \"//div[@class='before-focus']/div[3]\")\n# 更多\nbtn_more = (By.CLASS_NAME, 'btn-more')\n# 清空列表\nbtn_close_native = (By.CLASS_NAME, 'btn-close-native')\n# 点击修改收货数量\nicon_edit = (By.XPATH, \"//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]\")\n# 填写收货数量\ndiv_num = random.randint(1,9)\nnum_key = (By.XPATH, \"//div[@class='keyboard']/div[2]/div[%s]\"%div_num)\nnum_keys = (By.XPATH, \"//div[@class='keyboard']/div[2]\")\n# 确认修改收货数量\nkey_confirm = (By.XPATH, \"//div[@class='keyboard']/div[2]/div[12]\")\n# 订单内容\nresult_item = (By.CLASS_NAME, 'result-item')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 31 14:35:49 2019
@author: devinpowers
"""
# Lab 1 in CSE 231
#Quadratic Formula
# Find the roots in the Quadratic Formula
import math
a = float(input("Enter the coeddicient a: "))
b = float(input("Enter the coeddicient b: "))
c = float(input("Enter the coeddicient c: "))
print (" Coefficients:")
print( " Coefficient of a = ", a)
print( " Coefficient of b = ", b)
print( " Coefficient of c = ", c)
root_1 = (-b+(b**2-4*a*c)**(0.5))/(2*a)
root_2 = (-b-(b**2-4*a*c)**(0.5))/(2*a)
print("The roots of the equation:")
print( " Root 1 =", root_1)
print( " Root 2 =", root_2)
|
normal
|
{
"blob_id": "2acfd0bbad68bb9d55aeb39b180f4326a225f6d5",
"index": 1218,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(' Coefficients:')\nprint(' Coefficient of a = ', a)\nprint(' Coefficient of b = ', b)\nprint(' Coefficient of c = ', c)\n<mask token>\nprint('The roots of the equation:')\nprint(' Root 1 =', root_1)\nprint(' Root 2 =', root_2)\n",
"step-3": "<mask token>\na = float(input('Enter the coeddicient a: '))\nb = float(input('Enter the coeddicient b: '))\nc = float(input('Enter the coeddicient c: '))\nprint(' Coefficients:')\nprint(' Coefficient of a = ', a)\nprint(' Coefficient of b = ', b)\nprint(' Coefficient of c = ', c)\nroot_1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)\nroot_2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)\nprint('The roots of the equation:')\nprint(' Root 1 =', root_1)\nprint(' Root 2 =', root_2)\n",
"step-4": "<mask token>\nimport math\na = float(input('Enter the coeddicient a: '))\nb = float(input('Enter the coeddicient b: '))\nc = float(input('Enter the coeddicient c: '))\nprint(' Coefficients:')\nprint(' Coefficient of a = ', a)\nprint(' Coefficient of b = ', b)\nprint(' Coefficient of c = ', c)\nroot_1 = (-b + (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)\nroot_2 = (-b - (b ** 2 - 4 * a * c) ** 0.5) / (2 * a)\nprint('The roots of the equation:')\nprint(' Root 1 =', root_1)\nprint(' Root 2 =', root_2)\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 31 14:35:49 2019\n\n@author: devinpowers\n\"\"\"\n\n# Lab 1 in CSE 231\n#Quadratic Formula\n# Find the roots in the Quadratic Formula\n \nimport math\n\na = float(input(\"Enter the coeddicient a: \"))\nb = float(input(\"Enter the coeddicient b: \"))\nc = float(input(\"Enter the coeddicient c: \"))\n\nprint (\" Coefficients:\")\nprint( \" Coefficient of a = \", a)\nprint( \" Coefficient of b = \", b)\nprint( \" Coefficient of c = \", c)\n\nroot_1 = (-b+(b**2-4*a*c)**(0.5))/(2*a)\nroot_2 = (-b-(b**2-4*a*c)**(0.5))/(2*a)\n\nprint(\"The roots of the equation:\")\nprint( \" Root 1 =\", root_1)\nprint( \" Root 2 =\", root_2)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
<|reserved_special_token_0|>
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
if __name__ == '__main__':
file = sys.argv[1]
f = open(file)
for i in range(5):
exec(f.readline())
f.close()
[sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,
refill, fun, goal)
print('Status:', sat)
if sat == 'OPTIMAL':
print('Maximum total fun:', total_fun)
<|reserved_special_token_1|>
from ortools.sat.python import cp_model
import os
import math
import csv
import sys
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in
range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in
range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]
total_fun = sum([(fun[i] * play[i]) for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare
[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] + refill
).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
if __name__ == '__main__':
file = sys.argv[1]
f = open(file)
for i in range(5):
exec(f.readline())
f.close()
[sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,
refill, fun, goal)
print('Status:', sat)
if sat == 'OPTIMAL':
print('Maximum total fun:', total_fun)
<|reserved_special_token_1|>
from ortools.sat.python import cp_model
import os
import math
import csv
import sys
def ortoolsSolverReduceVar(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverRange(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-100, 1000, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(play[i] >= 1)
model.Add(play[i] <= token[i])
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverNeg(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')
model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
def ortoolsSolverComb(num, cap, refill, fun, goal):
model = cp_model.CpModel()
token = [model.NewIntVar(1, cap, 't%i' % i)
for i in range(1, num + 1)]
play = [model.NewIntVar(1, cap, 'q%i' % i)
for i in range(1, num + 1)]
compare = [model.NewBoolVar('c%i' % i)
for i in range(1, num + 1)]
neg = [model.NewBoolVar('n%i' % i)
for i in range(1, num + 1)]
total_fun = sum([fun[i] * play[i] for i in range(num)])
model.Add(total_fun >= goal)
model.Add(token[0] == cap)
for i in range(num):
model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])
model.Add(token[i] - play[i] + refill <=
cap).OnlyEnforceIf(compare[i].Not())
model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])
model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())
model.Add(play[i] <= token[i])
model.Add(play[i] == 1).OnlyEnforceIf(neg[i])
model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())
for i in range(1, num):
model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])
model.Add(token[i] == token[i - 1] - play[i - 1] +
refill).OnlyEnforceIf(compare[i - 1].Not())
model.Maximize(total_fun)
solver = cp_model.CpSolver()
status = solver.Solve(model)
sat = solver.StatusName()
time = solver.UserTime()
if status == cp_model.INFEASIBLE:
token = None
play = None
total_fun = None
else:
token = [solver.Value(token[i]) for i in range(num)]
play = [solver.Value(play[i]) for i in range(num)]
total_fun = solver.Value(total_fun)
return [sat, token, play, total_fun, time]
if __name__ == '__main__':
file = sys.argv[1]
f = open(file)
for i in range(5):
exec(f.readline())
f.close()
[sat, token, play, total_fun, time] = ortoolsSolverComb(
num, cap, refill, fun, goal)
print('Status:', sat)
if sat == 'OPTIMAL':
print('Maximum total fun:', total_fun)
|
flexible
|
{
"blob_id": "da98835e48a759cbe7bd29ddba1fac20c006827d",
"index": 4996,
"step-1": "<mask token>\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-4": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i) for i in\n range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i) for i in\n range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([(fun[i] * play[i]) for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i) for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i) for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i) for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i) for i in range(1, num + 1)]\n total_fun = sum([(fun[i] * play[i]) for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <= cap).OnlyEnforceIf(compare\n [i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] + refill\n ).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(num, cap,\n refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-5": "from ortools.sat.python import cp_model\nimport os\nimport math\nimport csv\nimport sys\n\ndef ortoolsSolverReduceVar(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverRange(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-100, 1000, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(play[i] >= 1)\n model.Add(play[i] <= token[i])\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\n\ndef ortoolsSolverNeg(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(-2147483648, 2147483647, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(-2147483648, 2147483647, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = model.NewIntVar(-2147483648, 2147483647, 'total_fun')\n model.Add(total_fun == sum([fun[i] * play[i] for i in range(num)]))\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\ndef ortoolsSolverComb(num, cap, refill, fun, goal):\n model = cp_model.CpModel()\n token = [model.NewIntVar(1, cap, 't%i' % i)\n for i in range(1, num + 1)]\n play = [model.NewIntVar(1, cap, 'q%i' % i)\n for i in range(1, num + 1)]\n compare = [model.NewBoolVar('c%i' % i)\n for i in range(1, num + 1)]\n neg = [model.NewBoolVar('n%i' % i)\n for i in range(1, num + 1)]\n total_fun = sum([fun[i] * play[i] for i in range(num)])\n model.Add(total_fun >= goal)\n model.Add(token[0] == cap)\n for i in range(num):\n model.Add(token[i] - play[i] + refill > cap).OnlyEnforceIf(compare[i])\n model.Add(token[i] - play[i] + refill <=\n cap).OnlyEnforceIf(compare[i].Not())\n model.Add(fun[i] < 0).OnlyEnforceIf(neg[i])\n model.Add(fun[i] >= 0).OnlyEnforceIf(neg[i].Not())\n model.Add(play[i] <= token[i])\n model.Add(play[i] == 1).OnlyEnforceIf(neg[i])\n model.Add(play[i] >= 1).OnlyEnforceIf(neg[i].Not())\n for i in range(1, num):\n model.Add(token[i] == cap).OnlyEnforceIf(compare[i - 1])\n model.Add(token[i] == token[i - 1] - play[i - 1] +\n refill).OnlyEnforceIf(compare[i - 1].Not())\n model.Maximize(total_fun)\n solver = cp_model.CpSolver()\n status = solver.Solve(model)\n sat = solver.StatusName()\n time = solver.UserTime()\n if status == cp_model.INFEASIBLE:\n token = None\n play = None\n total_fun = None\n else:\n token = [solver.Value(token[i]) for i in range(num)]\n play = [solver.Value(play[i]) for i in range(num)]\n total_fun = solver.Value(total_fun)\n return [sat, token, play, total_fun, time]\n\nif __name__ == '__main__':\n file = sys.argv[1]\n f = open(file)\n for i in range(5):\n exec(f.readline())\n f.close()\n [sat, token, play, total_fun, time] = ortoolsSolverComb(\n num, cap, refill, fun, goal)\n print('Status:', sat)\n if sat == 'OPTIMAL':\n print('Maximum total fun:', total_fun)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger('automated_error_catch')
logger.log_text(error_text)
<|reserved_special_token_0|>
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {
'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
if (allergens_text or traits_text
) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ''
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
if not traits:
return True
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ''
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens
) and 'No Service at this Time' not in item['name']:
returndata += prefix + item['name'].rstrip(', ') + suffix
return returndata
<|reserved_special_token_0|>
def find_matches(course_data, possible_matches, item_in, meal_name, requisites
):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
url = (
'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
)
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
data = requests.get(url).json()
if check_meal_available(data, meal_in):
returnstring = get_items(data, requisites, False).rstrip(', ')
return format_plural(returnstring)
else:
return 'No meal is available'
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger('automated_error_catch')
logger.log_text(error_text)
<|reserved_special_token_0|>
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {
'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
if (allergens_text or traits_text
) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ''
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
if not traits:
return True
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ''
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens
) and 'No Service at this Time' not in item['name']:
returndata += prefix + item['name'].rstrip(', ') + suffix
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[
-1]:
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length * -1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites
):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
url = (
'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
)
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
data = requests.get(url).json()
if check_meal_available(data, meal_in):
returnstring = get_items(data, requisites, False).rstrip(', ')
return format_plural(returnstring)
else:
return 'No meal is available'
def request_item(date_in, loc_in, item_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
if meal_in == '':
meal_entered = False
else:
meal_entered = True
data = requests.get(url).json()
possible_matches = []
for i in data['menu']['meal']:
if meal_entered and i['name'].upper() != meal_in.upper():
continue
if 'course' not in i:
continue
for j in i['course']:
for key, value in j.items():
if key == 'name':
course_data = j['menuitem']
meal_name = i['name']
possible_matches = find_matches(course_data,
possible_matches, item_in, meal_name, requisites)
if possible_matches:
possible_matches = find_item_formatting(possible_matches)
text = 'Yes, there is '
for i in range(len(possible_matches)):
if len(possible_matches) > 1 and i == len(possible_matches) - 1:
text += ' and'
text += ' ' + possible_matches[i]
if i != len(possible_matches) - 1:
text += ','
else:
text = 'Sorry, that is not available'
return {'fulfillmentText': text}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger('automated_error_catch')
logger.log_text(error_text)
def get_secrets():
"""Fetches secrets from Datastore and returns them as a list.
"""
client = datastore.Client()
query = client.query(kind='env_vars')
entity = query.fetch()
secrets = list(entity)[0]
return secrets
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {
'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
if (allergens_text or traits_text
) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ''
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
if not traits:
return True
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ''
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens
) and 'No Service at this Time' not in item['name']:
returndata += prefix + item['name'].rstrip(', ') + suffix
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[
-1]:
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length * -1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites
):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
url = (
'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
)
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
data = requests.get(url).json()
if check_meal_available(data, meal_in):
returnstring = get_items(data, requisites, False).rstrip(', ')
return format_plural(returnstring)
else:
return 'No meal is available'
def request_item(date_in, loc_in, item_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
if meal_in == '':
meal_entered = False
else:
meal_entered = True
data = requests.get(url).json()
possible_matches = []
for i in data['menu']['meal']:
if meal_entered and i['name'].upper() != meal_in.upper():
continue
if 'course' not in i:
continue
for j in i['course']:
for key, value in j.items():
if key == 'name':
course_data = j['menuitem']
meal_name = i['name']
possible_matches = find_matches(course_data,
possible_matches, item_in, meal_name, requisites)
if possible_matches:
possible_matches = find_item_formatting(possible_matches)
text = 'Yes, there is '
for i in range(len(possible_matches)):
if len(possible_matches) > 1 and i == len(possible_matches) - 1:
text += ' and'
text += ' ' + possible_matches[i]
if i != len(possible_matches) - 1:
text += ','
else:
text = 'Sorry, that is not available'
return {'fulfillmentText': text}
<|reserved_special_token_1|>
import requests
from google.cloud import datastore
import google.cloud.logging
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger('automated_error_catch')
logger.log_text(error_text)
def get_secrets():
"""Fetches secrets from Datastore and returns them as a list.
"""
client = datastore.Client()
query = client.query(kind='env_vars')
entity = query.fetch()
secrets = list(entity)[0]
return secrets
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {
'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
if (allergens_text or traits_text
) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ''
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
if not traits:
return True
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ''
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens
) and 'No Service at this Time' not in item['name']:
returndata += prefix + item['name'].rstrip(', ') + suffix
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[
-1]:
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length * -1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites
):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
url = (
'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
)
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
data = requests.get(url).json()
if check_meal_available(data, meal_in):
returnstring = get_items(data, requisites, False).rstrip(', ')
return format_plural(returnstring)
else:
return 'No meal is available'
def request_item(date_in, loc_in, item_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
location += loc_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
if meal_in == '':
meal_entered = False
else:
meal_entered = True
data = requests.get(url).json()
possible_matches = []
for i in data['menu']['meal']:
if meal_entered and i['name'].upper() != meal_in.upper():
continue
if 'course' not in i:
continue
for j in i['course']:
for key, value in j.items():
if key == 'name':
course_data = j['menuitem']
meal_name = i['name']
possible_matches = find_matches(course_data,
possible_matches, item_in, meal_name, requisites)
if possible_matches:
possible_matches = find_item_formatting(possible_matches)
text = 'Yes, there is '
for i in range(len(possible_matches)):
if len(possible_matches) > 1 and i == len(possible_matches) - 1:
text += ' and'
text += ' ' + possible_matches[i]
if i != len(possible_matches) - 1:
text += ','
else:
text = 'Sorry, that is not available'
return {'fulfillmentText': text}
<|reserved_special_token_1|>
import requests
from google.cloud import datastore
import google.cloud.logging
###Helper functions
def report_error(error_text):
"""Logs error to Stackdriver.
:param error_text: The text to log to Stackdriver
:type error_text: string
"""
client = google.cloud.logging.Client()
logger = client.logger("automated_error_catch")
logger.log_text(error_text)
def get_secrets():
"""Fetches secrets from Datastore and returns them as a list.
"""
client = datastore.Client()
query = client.query(kind='env_vars')
entity = query.fetch()
secrets = list(entity)[0]
return secrets
def format_requisites(text, requisites):
"""If any item requisites specified, adds them to response text data for more holistic response.
:param text: The response text data to be formatted
:type text: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits_text = ''
allergens_text = ''
req_map = {'trait': {'mhealthy': 'healthy'},
'allergens': {'sesame-seed': 'sesame seeds',
'tree-nuts': 'tree nuts',
'wheat_barley_rye': 'wheat or barley or rye'}}
#If traits specified, extract into a string
for i, trait in enumerate(requisites['trait']):
if traits_text:
traits_text += ', '
traits_text += req_map['trait'].get(trait, trait)
traits_text = format_plural(traits_text.rstrip(', '))
#If allergens specified, extract into a string
for i, allergen in enumerate(requisites['allergens']):
if allergens_text:
allergens_text += ', '
allergens_text += req_map['allergens'].get(allergen, allergen)
allergens_text = format_plural(allergens_text.rstrip(', '))
allergens_text = allergens_text.replace('and', 'or')
#Requisite-specific language
if allergens_text:
allergens_text = ' without ' + allergens_text
if traits_text:
traits_text = ' that is ' + traits_text
#Return combined string
if (allergens_text or traits_text) and 'Sorry, that is not available' in text:
traits_text = traits_text.replace(' that is ', '')
text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')
text = text.replace('that is not available', '[meal]')
return text + allergens_text + ' is not available'
else:
return text + traits_text + allergens_text
def format_plural(text):
"""Adds 'and' before last item in list of items.
:param text: The string to be manipulated
:type text: string
"""
if ',' in text:
index = text.rfind(',') + 2
text = text[:index] + 'and ' + text[index:]
return text
def remove_spaces(url_block):
"""Removes spaces in url string to create valid url string.
:param url_block: The url string to be manipulated
:type search: string
"""
temp = ""
for i in range(len(url_block)):
if url_block[i] == ' ':
temp += '+'
else:
temp += url_block[i]
return temp
def check_meal_available(data, meal):
"""Searches response data to check if meal is available at specified location/date.
:param data: MDining API HTTP response data
:type data: dict
:param meal: Name of meal
:type meal: string
"""
for key in data['menu']['meal']:
if data['menu']['meal']['name'].upper() == meal.upper():
if 'course' in data['menu']['meal']:
return True
return False
return False
def check_course_available(data, course):
"""Searches response data to check if course is available in specified meal.
:param data: MDining API HTTP response data
:type data: dict
:param course: Name of course
:type course: string
"""
for i in range(len(data['menu']['meal']['course'])):
for key, value in data['menu']['meal']['course'][i].items():
if key == 'name':
if value.upper() == course.upper():
return True
return False
def check_item_specifications(item, traits, allergens):
"""Returns true if food item is satisfactory with specified traits and allergens.
:param item: Data of specific food item
:type item: dict
:param traits: List of specified traits item must have, can be empty
:type traits: list
:param allergens: List of allergens item cannot have, can be empty
:type allergens: list
"""
#Return false if allergens list isn't empty and any allergens found
if allergens and 'allergens' in item:
for allergen in allergens:
if allergen in item['allergens']:
return False
#Return true if traits list empty
if not traits:
return True
#Return false if traits list isn't empty and any traits are missing
if 'trait' in item:
for trait in traits:
if trait not in item['trait']:
return False
#All traits found, return true
return True
else:
return False
def get_items(data, requisites, formatted):
"""Returns string of food items of each course in response data for
fulfillmentText in response to Dialogflow.
:param data: MDining API HTTP response data
:type data: dict
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
:param formatted: True/False - formats response string if true
:type formatted: boolean
"""
returndata = ""
traits = requisites['trait']
allergens = requisites['allergens']
if formatted:
prefix = '\t'
suffix = '\n'
else:
prefix = ''
suffix = ', '
for course in data['menu']['meal']['course']:
item_data = []
datatype = type(course['menuitem'])
if datatype is list:
item_data += course['menuitem']
else:
item_data.append(course['menuitem'])
for item in item_data:
if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:
returndata += (prefix + (item['name']).rstrip(', ') + suffix)
return returndata
def find_item_formatting(possible_matches):
"""Formatting list of possible matches into more natural sentence structure
by removing redundancy:
[Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->
[Chicken, chicken wings during lunch, and chicken patty during dinner]
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
"""
for i in range(len(possible_matches)):
if i == 0:
continue
words = possible_matches[i].split()
#If previous term has same ending ("Dinner") as current term, remove it
if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:
#8 = amount of characters taken up by [' during ']
length = len(possible_matches[i].split()[-1]) + 8
possible_matches[i - 1] = possible_matches[i - 1][:length*-1]
return possible_matches
def find_matches(course_data, possible_matches, item_in, meal_name, requisites):
"""Appends matches of specified food item in data of an individual course to
list of possible matches.
:param course_data: Chosen course subsection of MDining API HTTP response data
:type course_data: dict
:param possible_matches: List of food items in data that matched user input
:type possible_matches: list
:param item_in: User input food item
:type item_in: string
:param meal_name: Name of meal
:type meal_name: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
traits = requisites['trait']
allergens = requisites['allergens']
item_data = []
datatype = type(course_data)
if datatype is list:
item_data += course_data
else:
item_data.append(course_data)
for item in item_data:
if check_item_specifications(item, traits, allergens) == False:
continue
if item_in.upper() in item['name'].upper():
if item['name'][-1] == ' ':
item['name'] = item['name'][:-1]
possible_matches.append(item['name'] + ' during ' + meal_name)
return possible_matches
#########################################################################
###Primary Handler Functions
def request_location_and_meal(date_in, loc_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and meal entities from ``findLocationAndMeal`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param meal_in: Input meal
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
#preset vars
url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
meal += meal_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
#fetching json
data = requests.get(url).json()
#checking if specified meal available
if check_meal_available(data, meal_in):
returnstring = (get_items(data, requisites, False)).rstrip(', ')
return format_plural(returnstring)
else:
return "No meal is available"
#Handle meal item data request
def request_item(date_in, loc_in, item_in, meal_in, requisites):
"""Handles searching for appropriate data response for valid specified
location and food item entities (and meal entity if included) from ``findItem`` intent.
:param date_in: Input date
:type date_in: string
:param loc_in: Input location
:type loc_in: string
:param item_in: Input food item
:type item_in: string
:param meal_in: Input meal, can be empty string if not specified
:type meal_in: string
:param requisites: Contains information food item must comply with (traits, allergens, etc)
:type requisites: dict
"""
secrets = get_secrets()
url = secrets.get('m_dining_api_main')
location = '&location='
date = '&date='
meal = '&meal='
#API url concatenation
location += loc_in
date += str(date_in)
url = url + location + date + meal
url = remove_spaces(url)
if meal_in == '':
meal_entered = False
else:
meal_entered = True
#fetching json
data = requests.get(url).json()
possible_matches = []
#Loop through meals
for i in data['menu']['meal']:
#If meal specified, only check specified meal
if meal_entered and i['name'].upper() != meal_in.upper():
continue
#Skip meal if no food items available
if 'course' not in i:
continue
#Loop through food items in course
for j in i['course']:
for key, value in j.items():
if key == 'name':
course_data = j['menuitem']
meal_name = i['name']
#Append matches to specified item to possible_matches list
possible_matches = find_matches(course_data, possible_matches,
item_in, meal_name, requisites)
#Specified item found
if possible_matches:
possible_matches = find_item_formatting(possible_matches)
text = 'Yes, there is '
for i in range(len(possible_matches)):
if len(possible_matches) > 1 and (i == len(possible_matches) - 1):
text += ' and'
text += ' ' + possible_matches[i]
if i != len(possible_matches) - 1:
text += ','
#Specified item not found
else:
text = 'Sorry, that is not available'
return {'fulfillmentText': text}
|
flexible
|
{
"blob_id": "bf2b3b74f772026328cdd04412455ee758c43d3f",
"index": 8142,
"step-1": "<mask token>\n\n\ndef report_error(error_text):\n \"\"\"Logs error to Stackdriver.\n :param error_text: The text to log to Stackdriver\n :type error_text: string\n \"\"\"\n client = google.cloud.logging.Client()\n logger = client.logger('automated_error_catch')\n logger.log_text(error_text)\n\n\n<mask token>\n\n\ndef format_requisites(text, requisites):\n \"\"\"If any item requisites specified, adds them to response text data for more holistic response.\n\n :param text: The response text data to be formatted\n :type text: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits_text = ''\n allergens_text = ''\n req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {\n 'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',\n 'wheat_barley_rye': 'wheat or barley or rye'}}\n for i, trait in enumerate(requisites['trait']):\n if traits_text:\n traits_text += ', '\n traits_text += req_map['trait'].get(trait, trait)\n traits_text = format_plural(traits_text.rstrip(', '))\n for i, allergen in enumerate(requisites['allergens']):\n if allergens_text:\n allergens_text += ', '\n allergens_text += req_map['allergens'].get(allergen, allergen)\n allergens_text = format_plural(allergens_text.rstrip(', '))\n allergens_text = allergens_text.replace('and', 'or')\n if allergens_text:\n allergens_text = ' without ' + allergens_text\n if traits_text:\n traits_text = ' that is ' + traits_text\n if (allergens_text or traits_text\n ) and 'Sorry, that is not available' in text:\n traits_text = traits_text.replace(' that is ', '')\n text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')\n text = text.replace('that is not available', '[meal]')\n return text + allergens_text + ' is not available'\n else:\n return text + traits_text + allergens_text\n\n\ndef format_plural(text):\n \"\"\"Adds 'and' before last item in list of items.\n\n :param text: The string to be manipulated\n :type text: string\n \"\"\"\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text\n\n\ndef remove_spaces(url_block):\n \"\"\"Removes spaces in url string to create valid url string.\n\n :param url_block: The url string to be manipulated\n :type search: string\n \"\"\"\n temp = ''\n for i in range(len(url_block)):\n if url_block[i] == ' ':\n temp += '+'\n else:\n temp += url_block[i]\n return temp\n\n\ndef check_meal_available(data, meal):\n \"\"\"Searches response data to check if meal is available at specified location/date.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param meal: Name of meal\n :type meal: string\n \"\"\"\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False\n\n\ndef check_course_available(data, course):\n \"\"\"Searches response data to check if course is available in specified meal.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param course: Name of course\n :type course: string\n \"\"\"\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False\n\n\ndef check_item_specifications(item, traits, allergens):\n \"\"\"Returns true if food item is satisfactory with specified traits and allergens.\n\n :param item: Data of specific food item\n :type item: dict\n :param traits: List of specified traits item must have, can be empty\n :type traits: list\n :param allergens: List of allergens item cannot have, can be empty\n :type allergens: list\n \"\"\"\n if allergens and 'allergens' in item:\n for allergen in allergens:\n if allergen in item['allergens']:\n return False\n if not traits:\n return True\n if 'trait' in item:\n for trait in traits:\n if trait not in item['trait']:\n return False\n return True\n else:\n return False\n\n\ndef get_items(data, requisites, formatted):\n \"\"\"Returns string of food items of each course in response data for\n fulfillmentText in response to Dialogflow.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n :param formatted: True/False - formats response string if true\n :type formatted: boolean\n \"\"\"\n returndata = ''\n traits = requisites['trait']\n allergens = requisites['allergens']\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n for item in item_data:\n if check_item_specifications(item, traits, allergens\n ) and 'No Service at this Time' not in item['name']:\n returndata += prefix + item['name'].rstrip(', ') + suffix\n return returndata\n\n\n<mask token>\n\n\ndef find_matches(course_data, possible_matches, item_in, meal_name, requisites\n ):\n \"\"\"Appends matches of specified food item in data of an individual course to\n list of possible matches.\n\n :param course_data: Chosen course subsection of MDining API HTTP response data\n :type course_data: dict\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n :param item_in: User input food item\n :type item_in: string\n :param meal_name: Name of meal\n :type meal_name: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n item_data = []\n datatype = type(course_data)\n if datatype is list:\n item_data += course_data\n else:\n item_data.append(course_data)\n for item in item_data:\n if check_item_specifications(item, traits, allergens) == False:\n continue\n if item_in.upper() in item['name'].upper():\n if item['name'][-1] == ' ':\n item['name'] = item['name'][:-1]\n possible_matches.append(item['name'] + ' during ' + meal_name)\n return possible_matches\n\n\ndef request_location_and_meal(date_in, loc_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and meal entities from ``findLocationAndMeal`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param meal_in: Input meal\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n url = (\n 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n )\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n data = requests.get(url).json()\n if check_meal_available(data, meal_in):\n returnstring = get_items(data, requisites, False).rstrip(', ')\n return format_plural(returnstring)\n else:\n return 'No meal is available'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef report_error(error_text):\n \"\"\"Logs error to Stackdriver.\n :param error_text: The text to log to Stackdriver\n :type error_text: string\n \"\"\"\n client = google.cloud.logging.Client()\n logger = client.logger('automated_error_catch')\n logger.log_text(error_text)\n\n\n<mask token>\n\n\ndef format_requisites(text, requisites):\n \"\"\"If any item requisites specified, adds them to response text data for more holistic response.\n\n :param text: The response text data to be formatted\n :type text: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits_text = ''\n allergens_text = ''\n req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {\n 'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',\n 'wheat_barley_rye': 'wheat or barley or rye'}}\n for i, trait in enumerate(requisites['trait']):\n if traits_text:\n traits_text += ', '\n traits_text += req_map['trait'].get(trait, trait)\n traits_text = format_plural(traits_text.rstrip(', '))\n for i, allergen in enumerate(requisites['allergens']):\n if allergens_text:\n allergens_text += ', '\n allergens_text += req_map['allergens'].get(allergen, allergen)\n allergens_text = format_plural(allergens_text.rstrip(', '))\n allergens_text = allergens_text.replace('and', 'or')\n if allergens_text:\n allergens_text = ' without ' + allergens_text\n if traits_text:\n traits_text = ' that is ' + traits_text\n if (allergens_text or traits_text\n ) and 'Sorry, that is not available' in text:\n traits_text = traits_text.replace(' that is ', '')\n text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')\n text = text.replace('that is not available', '[meal]')\n return text + allergens_text + ' is not available'\n else:\n return text + traits_text + allergens_text\n\n\ndef format_plural(text):\n \"\"\"Adds 'and' before last item in list of items.\n\n :param text: The string to be manipulated\n :type text: string\n \"\"\"\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text\n\n\ndef remove_spaces(url_block):\n \"\"\"Removes spaces in url string to create valid url string.\n\n :param url_block: The url string to be manipulated\n :type search: string\n \"\"\"\n temp = ''\n for i in range(len(url_block)):\n if url_block[i] == ' ':\n temp += '+'\n else:\n temp += url_block[i]\n return temp\n\n\ndef check_meal_available(data, meal):\n \"\"\"Searches response data to check if meal is available at specified location/date.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param meal: Name of meal\n :type meal: string\n \"\"\"\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False\n\n\ndef check_course_available(data, course):\n \"\"\"Searches response data to check if course is available in specified meal.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param course: Name of course\n :type course: string\n \"\"\"\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False\n\n\ndef check_item_specifications(item, traits, allergens):\n \"\"\"Returns true if food item is satisfactory with specified traits and allergens.\n\n :param item: Data of specific food item\n :type item: dict\n :param traits: List of specified traits item must have, can be empty\n :type traits: list\n :param allergens: List of allergens item cannot have, can be empty\n :type allergens: list\n \"\"\"\n if allergens and 'allergens' in item:\n for allergen in allergens:\n if allergen in item['allergens']:\n return False\n if not traits:\n return True\n if 'trait' in item:\n for trait in traits:\n if trait not in item['trait']:\n return False\n return True\n else:\n return False\n\n\ndef get_items(data, requisites, formatted):\n \"\"\"Returns string of food items of each course in response data for\n fulfillmentText in response to Dialogflow.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n :param formatted: True/False - formats response string if true\n :type formatted: boolean\n \"\"\"\n returndata = ''\n traits = requisites['trait']\n allergens = requisites['allergens']\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n for item in item_data:\n if check_item_specifications(item, traits, allergens\n ) and 'No Service at this Time' not in item['name']:\n returndata += prefix + item['name'].rstrip(', ') + suffix\n return returndata\n\n\ndef find_item_formatting(possible_matches):\n \"\"\"Formatting list of possible matches into more natural sentence structure\n by removing redundancy:\n [Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->\n [Chicken, chicken wings during lunch, and chicken patty during dinner]\n\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n \"\"\"\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[\n -1]:\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length * -1]\n return possible_matches\n\n\ndef find_matches(course_data, possible_matches, item_in, meal_name, requisites\n ):\n \"\"\"Appends matches of specified food item in data of an individual course to\n list of possible matches.\n\n :param course_data: Chosen course subsection of MDining API HTTP response data\n :type course_data: dict\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n :param item_in: User input food item\n :type item_in: string\n :param meal_name: Name of meal\n :type meal_name: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n item_data = []\n datatype = type(course_data)\n if datatype is list:\n item_data += course_data\n else:\n item_data.append(course_data)\n for item in item_data:\n if check_item_specifications(item, traits, allergens) == False:\n continue\n if item_in.upper() in item['name'].upper():\n if item['name'][-1] == ' ':\n item['name'] = item['name'][:-1]\n possible_matches.append(item['name'] + ' during ' + meal_name)\n return possible_matches\n\n\ndef request_location_and_meal(date_in, loc_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and meal entities from ``findLocationAndMeal`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param meal_in: Input meal\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n url = (\n 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n )\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n data = requests.get(url).json()\n if check_meal_available(data, meal_in):\n returnstring = get_items(data, requisites, False).rstrip(', ')\n return format_plural(returnstring)\n else:\n return 'No meal is available'\n\n\ndef request_item(date_in, loc_in, item_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and food item entities (and meal entity if included) from ``findItem`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param item_in: Input food item\n :type item_in: string\n :param meal_in: Input meal, can be empty string if not specified\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n data = requests.get(url).json()\n possible_matches = []\n for i in data['menu']['meal']:\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n if 'course' not in i:\n continue\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n possible_matches = find_matches(course_data,\n possible_matches, item_in, meal_name, requisites)\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and i == len(possible_matches) - 1:\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n else:\n text = 'Sorry, that is not available'\n return {'fulfillmentText': text}\n",
"step-3": "<mask token>\n\n\ndef report_error(error_text):\n \"\"\"Logs error to Stackdriver.\n :param error_text: The text to log to Stackdriver\n :type error_text: string\n \"\"\"\n client = google.cloud.logging.Client()\n logger = client.logger('automated_error_catch')\n logger.log_text(error_text)\n\n\ndef get_secrets():\n \"\"\"Fetches secrets from Datastore and returns them as a list.\n \"\"\"\n client = datastore.Client()\n query = client.query(kind='env_vars')\n entity = query.fetch()\n secrets = list(entity)[0]\n return secrets\n\n\ndef format_requisites(text, requisites):\n \"\"\"If any item requisites specified, adds them to response text data for more holistic response.\n\n :param text: The response text data to be formatted\n :type text: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits_text = ''\n allergens_text = ''\n req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {\n 'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',\n 'wheat_barley_rye': 'wheat or barley or rye'}}\n for i, trait in enumerate(requisites['trait']):\n if traits_text:\n traits_text += ', '\n traits_text += req_map['trait'].get(trait, trait)\n traits_text = format_plural(traits_text.rstrip(', '))\n for i, allergen in enumerate(requisites['allergens']):\n if allergens_text:\n allergens_text += ', '\n allergens_text += req_map['allergens'].get(allergen, allergen)\n allergens_text = format_plural(allergens_text.rstrip(', '))\n allergens_text = allergens_text.replace('and', 'or')\n if allergens_text:\n allergens_text = ' without ' + allergens_text\n if traits_text:\n traits_text = ' that is ' + traits_text\n if (allergens_text or traits_text\n ) and 'Sorry, that is not available' in text:\n traits_text = traits_text.replace(' that is ', '')\n text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')\n text = text.replace('that is not available', '[meal]')\n return text + allergens_text + ' is not available'\n else:\n return text + traits_text + allergens_text\n\n\ndef format_plural(text):\n \"\"\"Adds 'and' before last item in list of items.\n\n :param text: The string to be manipulated\n :type text: string\n \"\"\"\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text\n\n\ndef remove_spaces(url_block):\n \"\"\"Removes spaces in url string to create valid url string.\n\n :param url_block: The url string to be manipulated\n :type search: string\n \"\"\"\n temp = ''\n for i in range(len(url_block)):\n if url_block[i] == ' ':\n temp += '+'\n else:\n temp += url_block[i]\n return temp\n\n\ndef check_meal_available(data, meal):\n \"\"\"Searches response data to check if meal is available at specified location/date.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param meal: Name of meal\n :type meal: string\n \"\"\"\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False\n\n\ndef check_course_available(data, course):\n \"\"\"Searches response data to check if course is available in specified meal.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param course: Name of course\n :type course: string\n \"\"\"\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False\n\n\ndef check_item_specifications(item, traits, allergens):\n \"\"\"Returns true if food item is satisfactory with specified traits and allergens.\n\n :param item: Data of specific food item\n :type item: dict\n :param traits: List of specified traits item must have, can be empty\n :type traits: list\n :param allergens: List of allergens item cannot have, can be empty\n :type allergens: list\n \"\"\"\n if allergens and 'allergens' in item:\n for allergen in allergens:\n if allergen in item['allergens']:\n return False\n if not traits:\n return True\n if 'trait' in item:\n for trait in traits:\n if trait not in item['trait']:\n return False\n return True\n else:\n return False\n\n\ndef get_items(data, requisites, formatted):\n \"\"\"Returns string of food items of each course in response data for\n fulfillmentText in response to Dialogflow.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n :param formatted: True/False - formats response string if true\n :type formatted: boolean\n \"\"\"\n returndata = ''\n traits = requisites['trait']\n allergens = requisites['allergens']\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n for item in item_data:\n if check_item_specifications(item, traits, allergens\n ) and 'No Service at this Time' not in item['name']:\n returndata += prefix + item['name'].rstrip(', ') + suffix\n return returndata\n\n\ndef find_item_formatting(possible_matches):\n \"\"\"Formatting list of possible matches into more natural sentence structure\n by removing redundancy:\n [Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->\n [Chicken, chicken wings during lunch, and chicken patty during dinner]\n\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n \"\"\"\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[\n -1]:\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length * -1]\n return possible_matches\n\n\ndef find_matches(course_data, possible_matches, item_in, meal_name, requisites\n ):\n \"\"\"Appends matches of specified food item in data of an individual course to\n list of possible matches.\n\n :param course_data: Chosen course subsection of MDining API HTTP response data\n :type course_data: dict\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n :param item_in: User input food item\n :type item_in: string\n :param meal_name: Name of meal\n :type meal_name: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n item_data = []\n datatype = type(course_data)\n if datatype is list:\n item_data += course_data\n else:\n item_data.append(course_data)\n for item in item_data:\n if check_item_specifications(item, traits, allergens) == False:\n continue\n if item_in.upper() in item['name'].upper():\n if item['name'][-1] == ' ':\n item['name'] = item['name'][:-1]\n possible_matches.append(item['name'] + ' during ' + meal_name)\n return possible_matches\n\n\ndef request_location_and_meal(date_in, loc_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and meal entities from ``findLocationAndMeal`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param meal_in: Input meal\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n url = (\n 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n )\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n data = requests.get(url).json()\n if check_meal_available(data, meal_in):\n returnstring = get_items(data, requisites, False).rstrip(', ')\n return format_plural(returnstring)\n else:\n return 'No meal is available'\n\n\ndef request_item(date_in, loc_in, item_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and food item entities (and meal entity if included) from ``findItem`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param item_in: Input food item\n :type item_in: string\n :param meal_in: Input meal, can be empty string if not specified\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n data = requests.get(url).json()\n possible_matches = []\n for i in data['menu']['meal']:\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n if 'course' not in i:\n continue\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n possible_matches = find_matches(course_data,\n possible_matches, item_in, meal_name, requisites)\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and i == len(possible_matches) - 1:\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n else:\n text = 'Sorry, that is not available'\n return {'fulfillmentText': text}\n",
"step-4": "import requests\nfrom google.cloud import datastore\nimport google.cloud.logging\n\n\ndef report_error(error_text):\n \"\"\"Logs error to Stackdriver.\n :param error_text: The text to log to Stackdriver\n :type error_text: string\n \"\"\"\n client = google.cloud.logging.Client()\n logger = client.logger('automated_error_catch')\n logger.log_text(error_text)\n\n\ndef get_secrets():\n \"\"\"Fetches secrets from Datastore and returns them as a list.\n \"\"\"\n client = datastore.Client()\n query = client.query(kind='env_vars')\n entity = query.fetch()\n secrets = list(entity)[0]\n return secrets\n\n\ndef format_requisites(text, requisites):\n \"\"\"If any item requisites specified, adds them to response text data for more holistic response.\n\n :param text: The response text data to be formatted\n :type text: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits_text = ''\n allergens_text = ''\n req_map = {'trait': {'mhealthy': 'healthy'}, 'allergens': {\n 'sesame-seed': 'sesame seeds', 'tree-nuts': 'tree nuts',\n 'wheat_barley_rye': 'wheat or barley or rye'}}\n for i, trait in enumerate(requisites['trait']):\n if traits_text:\n traits_text += ', '\n traits_text += req_map['trait'].get(trait, trait)\n traits_text = format_plural(traits_text.rstrip(', '))\n for i, allergen in enumerate(requisites['allergens']):\n if allergens_text:\n allergens_text += ', '\n allergens_text += req_map['allergens'].get(allergen, allergen)\n allergens_text = format_plural(allergens_text.rstrip(', '))\n allergens_text = allergens_text.replace('and', 'or')\n if allergens_text:\n allergens_text = ' without ' + allergens_text\n if traits_text:\n traits_text = ' that is ' + traits_text\n if (allergens_text or traits_text\n ) and 'Sorry, that is not available' in text:\n traits_text = traits_text.replace(' that is ', '')\n text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')\n text = text.replace('that is not available', '[meal]')\n return text + allergens_text + ' is not available'\n else:\n return text + traits_text + allergens_text\n\n\ndef format_plural(text):\n \"\"\"Adds 'and' before last item in list of items.\n\n :param text: The string to be manipulated\n :type text: string\n \"\"\"\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text\n\n\ndef remove_spaces(url_block):\n \"\"\"Removes spaces in url string to create valid url string.\n\n :param url_block: The url string to be manipulated\n :type search: string\n \"\"\"\n temp = ''\n for i in range(len(url_block)):\n if url_block[i] == ' ':\n temp += '+'\n else:\n temp += url_block[i]\n return temp\n\n\ndef check_meal_available(data, meal):\n \"\"\"Searches response data to check if meal is available at specified location/date.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param meal: Name of meal\n :type meal: string\n \"\"\"\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False\n\n\ndef check_course_available(data, course):\n \"\"\"Searches response data to check if course is available in specified meal.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param course: Name of course\n :type course: string\n \"\"\"\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False\n\n\ndef check_item_specifications(item, traits, allergens):\n \"\"\"Returns true if food item is satisfactory with specified traits and allergens.\n\n :param item: Data of specific food item\n :type item: dict\n :param traits: List of specified traits item must have, can be empty\n :type traits: list\n :param allergens: List of allergens item cannot have, can be empty\n :type allergens: list\n \"\"\"\n if allergens and 'allergens' in item:\n for allergen in allergens:\n if allergen in item['allergens']:\n return False\n if not traits:\n return True\n if 'trait' in item:\n for trait in traits:\n if trait not in item['trait']:\n return False\n return True\n else:\n return False\n\n\ndef get_items(data, requisites, formatted):\n \"\"\"Returns string of food items of each course in response data for\n fulfillmentText in response to Dialogflow.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n :param formatted: True/False - formats response string if true\n :type formatted: boolean\n \"\"\"\n returndata = ''\n traits = requisites['trait']\n allergens = requisites['allergens']\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n for item in item_data:\n if check_item_specifications(item, traits, allergens\n ) and 'No Service at this Time' not in item['name']:\n returndata += prefix + item['name'].rstrip(', ') + suffix\n return returndata\n\n\ndef find_item_formatting(possible_matches):\n \"\"\"Formatting list of possible matches into more natural sentence structure\n by removing redundancy:\n [Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->\n [Chicken, chicken wings during lunch, and chicken patty during dinner]\n\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n \"\"\"\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[\n -1]:\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length * -1]\n return possible_matches\n\n\ndef find_matches(course_data, possible_matches, item_in, meal_name, requisites\n ):\n \"\"\"Appends matches of specified food item in data of an individual course to\n list of possible matches.\n\n :param course_data: Chosen course subsection of MDining API HTTP response data\n :type course_data: dict\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n :param item_in: User input food item\n :type item_in: string\n :param meal_name: Name of meal\n :type meal_name: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n item_data = []\n datatype = type(course_data)\n if datatype is list:\n item_data += course_data\n else:\n item_data.append(course_data)\n for item in item_data:\n if check_item_specifications(item, traits, allergens) == False:\n continue\n if item_in.upper() in item['name'].upper():\n if item['name'][-1] == ' ':\n item['name'] = item['name'][:-1]\n possible_matches.append(item['name'] + ' during ' + meal_name)\n return possible_matches\n\n\ndef request_location_and_meal(date_in, loc_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and meal entities from ``findLocationAndMeal`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param meal_in: Input meal\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n url = (\n 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n )\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n data = requests.get(url).json()\n if check_meal_available(data, meal_in):\n returnstring = get_items(data, requisites, False).rstrip(', ')\n return format_plural(returnstring)\n else:\n return 'No meal is available'\n\n\ndef request_item(date_in, loc_in, item_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and food item entities (and meal entity if included) from ``findItem`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param item_in: Input food item\n :type item_in: string\n :param meal_in: Input meal, can be empty string if not specified\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n data = requests.get(url).json()\n possible_matches = []\n for i in data['menu']['meal']:\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n if 'course' not in i:\n continue\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n possible_matches = find_matches(course_data,\n possible_matches, item_in, meal_name, requisites)\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and i == len(possible_matches) - 1:\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n else:\n text = 'Sorry, that is not available'\n return {'fulfillmentText': text}\n",
"step-5": "import requests\nfrom google.cloud import datastore\nimport google.cloud.logging\n\n###Helper functions\n\ndef report_error(error_text):\n \"\"\"Logs error to Stackdriver.\n :param error_text: The text to log to Stackdriver\n :type error_text: string\n \"\"\"\n client = google.cloud.logging.Client()\n logger = client.logger(\"automated_error_catch\")\n logger.log_text(error_text)\n\ndef get_secrets():\n \"\"\"Fetches secrets from Datastore and returns them as a list.\n \"\"\"\n client = datastore.Client()\n query = client.query(kind='env_vars')\n entity = query.fetch()\n secrets = list(entity)[0]\n return secrets\n\ndef format_requisites(text, requisites):\n \"\"\"If any item requisites specified, adds them to response text data for more holistic response.\n\n :param text: The response text data to be formatted\n :type text: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n traits_text = ''\n allergens_text = ''\n\n req_map = {'trait': {'mhealthy': 'healthy'},\n 'allergens': {'sesame-seed': 'sesame seeds',\n 'tree-nuts': 'tree nuts',\n 'wheat_barley_rye': 'wheat or barley or rye'}}\n\n #If traits specified, extract into a string\n for i, trait in enumerate(requisites['trait']):\n if traits_text:\n traits_text += ', '\n traits_text += req_map['trait'].get(trait, trait)\n traits_text = format_plural(traits_text.rstrip(', '))\n\n #If allergens specified, extract into a string\n for i, allergen in enumerate(requisites['allergens']):\n if allergens_text:\n allergens_text += ', '\n allergens_text += req_map['allergens'].get(allergen, allergen)\n allergens_text = format_plural(allergens_text.rstrip(', '))\n allergens_text = allergens_text.replace('and', 'or')\n\n #Requisite-specific language\n if allergens_text:\n allergens_text = ' without ' + allergens_text\n if traits_text:\n traits_text = ' that is ' + traits_text\n\n #Return combined string\n if (allergens_text or traits_text) and 'Sorry, that is not available' in text:\n traits_text = traits_text.replace(' that is ', '')\n text = text.replace('Sorry, ', 'Sorry, ' + traits_text + ' ')\n text = text.replace('that is not available', '[meal]')\n return text + allergens_text + ' is not available'\n else:\n return text + traits_text + allergens_text\n\ndef format_plural(text):\n \"\"\"Adds 'and' before last item in list of items.\n\n :param text: The string to be manipulated\n :type text: string\n \"\"\"\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text\n\ndef remove_spaces(url_block):\n \"\"\"Removes spaces in url string to create valid url string.\n\n :param url_block: The url string to be manipulated\n :type search: string\n \"\"\"\n temp = \"\"\n for i in range(len(url_block)):\n if url_block[i] == ' ':\n temp += '+'\n else:\n temp += url_block[i]\n return temp\n\ndef check_meal_available(data, meal):\n \"\"\"Searches response data to check if meal is available at specified location/date.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param meal: Name of meal\n :type meal: string\n \"\"\"\n for key in data['menu']['meal']:\n if data['menu']['meal']['name'].upper() == meal.upper():\n if 'course' in data['menu']['meal']:\n return True\n return False\n return False\n\ndef check_course_available(data, course):\n \"\"\"Searches response data to check if course is available in specified meal.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param course: Name of course\n :type course: string\n \"\"\"\n for i in range(len(data['menu']['meal']['course'])):\n for key, value in data['menu']['meal']['course'][i].items():\n if key == 'name':\n if value.upper() == course.upper():\n return True\n return False\n\n\n\ndef check_item_specifications(item, traits, allergens):\n \"\"\"Returns true if food item is satisfactory with specified traits and allergens.\n\n :param item: Data of specific food item\n :type item: dict\n :param traits: List of specified traits item must have, can be empty\n :type traits: list\n :param allergens: List of allergens item cannot have, can be empty\n :type allergens: list\n \"\"\"\n #Return false if allergens list isn't empty and any allergens found\n if allergens and 'allergens' in item:\n for allergen in allergens:\n if allergen in item['allergens']:\n return False\n\n #Return true if traits list empty\n if not traits:\n return True\n\n #Return false if traits list isn't empty and any traits are missing\n if 'trait' in item:\n for trait in traits:\n if trait not in item['trait']:\n return False\n\n #All traits found, return true\n return True\n else:\n return False\n\ndef get_items(data, requisites, formatted):\n \"\"\"Returns string of food items of each course in response data for\n fulfillmentText in response to Dialogflow.\n\n :param data: MDining API HTTP response data\n :type data: dict\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n :param formatted: True/False - formats response string if true\n :type formatted: boolean\n \"\"\"\n returndata = \"\"\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n if formatted:\n prefix = '\\t'\n suffix = '\\n'\n else:\n prefix = ''\n suffix = ', '\n\n for course in data['menu']['meal']['course']:\n item_data = []\n datatype = type(course['menuitem'])\n\n if datatype is list:\n item_data += course['menuitem']\n else:\n item_data.append(course['menuitem'])\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) and 'No Service at this Time' not in item['name']:\n returndata += (prefix + (item['name']).rstrip(', ') + suffix)\n\n return returndata\n\ndef find_item_formatting(possible_matches):\n \"\"\"Formatting list of possible matches into more natural sentence structure\n by removing redundancy:\n [Chicken during lunch, chicken wings during lunch, and chicken patty during dinner] ->\n [Chicken, chicken wings during lunch, and chicken patty during dinner]\n\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n \"\"\"\n for i in range(len(possible_matches)):\n if i == 0:\n continue\n words = possible_matches[i].split()\n\n #If previous term has same ending (\"Dinner\") as current term, remove it\n if possible_matches[i].split()[-1] == possible_matches[i - 1].split()[-1]:\n #8 = amount of characters taken up by [' during ']\n length = len(possible_matches[i].split()[-1]) + 8\n possible_matches[i - 1] = possible_matches[i - 1][:length*-1]\n\n return possible_matches\n\n\ndef find_matches(course_data, possible_matches, item_in, meal_name, requisites):\n \"\"\"Appends matches of specified food item in data of an individual course to\n list of possible matches.\n\n :param course_data: Chosen course subsection of MDining API HTTP response data\n :type course_data: dict\n :param possible_matches: List of food items in data that matched user input\n :type possible_matches: list\n :param item_in: User input food item\n :type item_in: string\n :param meal_name: Name of meal\n :type meal_name: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n\n traits = requisites['trait']\n allergens = requisites['allergens']\n\n item_data = []\n datatype = type(course_data)\n\n if datatype is list:\n item_data += course_data\n else:\n item_data.append(course_data)\n\n for item in item_data:\n if check_item_specifications(item, traits, allergens) == False:\n continue\n if item_in.upper() in item['name'].upper():\n if item['name'][-1] == ' ':\n item['name'] = item['name'][:-1]\n\n possible_matches.append(item['name'] + ' during ' + meal_name)\n\n return possible_matches\n\n\n\n#########################################################################\n###Primary Handler Functions\n\n\ndef request_location_and_meal(date_in, loc_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and meal entities from ``findLocationAndMeal`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param meal_in: Input meal\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n\n #preset vars\n url = 'http://api.studentlife.umich.edu/menu/xml2print.php?controller=&view=json'\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n meal += meal_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n #fetching json\n data = requests.get(url).json()\n\n #checking if specified meal available\n if check_meal_available(data, meal_in):\n returnstring = (get_items(data, requisites, False)).rstrip(', ')\n return format_plural(returnstring)\n else:\n return \"No meal is available\"\n\n#Handle meal item data request\ndef request_item(date_in, loc_in, item_in, meal_in, requisites):\n \"\"\"Handles searching for appropriate data response for valid specified\n location and food item entities (and meal entity if included) from ``findItem`` intent.\n\n :param date_in: Input date\n :type date_in: string\n :param loc_in: Input location\n :type loc_in: string\n :param item_in: Input food item\n :type item_in: string\n :param meal_in: Input meal, can be empty string if not specified\n :type meal_in: string\n :param requisites: Contains information food item must comply with (traits, allergens, etc)\n :type requisites: dict\n \"\"\"\n secrets = get_secrets()\n url = secrets.get('m_dining_api_main')\n location = '&location='\n date = '&date='\n meal = '&meal='\n\n #API url concatenation\n location += loc_in\n date += str(date_in)\n url = url + location + date + meal\n url = remove_spaces(url)\n\n if meal_in == '':\n meal_entered = False\n else:\n meal_entered = True\n\n #fetching json\n data = requests.get(url).json()\n\n possible_matches = []\n\n #Loop through meals\n for i in data['menu']['meal']:\n\n #If meal specified, only check specified meal\n if meal_entered and i['name'].upper() != meal_in.upper():\n continue\n #Skip meal if no food items available\n if 'course' not in i:\n continue\n\n #Loop through food items in course\n for j in i['course']:\n for key, value in j.items():\n if key == 'name':\n course_data = j['menuitem']\n meal_name = i['name']\n #Append matches to specified item to possible_matches list\n possible_matches = find_matches(course_data, possible_matches,\n item_in, meal_name, requisites)\n \n #Specified item found\n if possible_matches:\n possible_matches = find_item_formatting(possible_matches)\n text = 'Yes, there is '\n for i in range(len(possible_matches)):\n if len(possible_matches) > 1 and (i == len(possible_matches) - 1):\n text += ' and'\n text += ' ' + possible_matches[i]\n if i != len(possible_matches) - 1:\n text += ','\n\n #Specified item not found\n else:\n text = 'Sorry, that is not available'\n\n\n return {'fulfillmentText': text}\n",
"step-ids": [
10,
12,
13,
14,
15
]
}
|
[
10,
12,
13,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def first_repeat(chars):
for x in chars:
if chars.count(x) > 1:
return x
return '-1'
|
flexible
|
{
"blob_id": "bf683f8e7fb5ad5f7cd915a8a01d9adf7d13e739",
"index": 3375,
"step-1": "<mask token>\n",
"step-2": "def first_repeat(chars):\n for x in chars:\n if chars.count(x) > 1:\n return x\n return '-1'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
<|reserved_special_token_1|>
import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split('\n\n')
for sentence in paragraph:
sentWithPunctuation = sentence
sentNoPunctuation = re.sub('[^\\w\\s]', '', sentence)
words = sentNoPunctuation.split(' ')
for word in words:
wordLen = wordLen + len(word)
totWords = totWords + len(words)
avgSentLen_Words = round(totWords / len(paragraph), 2)
avgLetterCount = round(wordLen / totWords, 2)
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)
print(f"""
Paragraph Analysis of '{sourceFile}' file""")
print(f'---------------------------------------------------------')
print(f' Approximate Word Count: {totWords} ')
print(f' Approximate Sentence Count: {len(paragraph)} ')
print(f' Average Letter Count: {avgLetterCount} ')
print(f' Average Sentence Length (words): {avgSentLen_Words} ')
print(f' Average Sentence Length (chars): {avgSentLen_chars} ')
<|reserved_special_token_1|>
import os
import csv
import re
totWords = 0
wordLen = 0
totSentWithPunctuation = 0
sourceFile = os.path.join('Resources', 'paragraph_2.txt')
with open(sourceFile, 'r') as paragraph:
paragraph = paragraph.read().split("\n\n")
for sentence in paragraph:
# Remove punctuation from sentences
sentWithPunctuation = sentence
sentNoPunctuation = re.sub(r'[^\w\s]','',sentence)
#Split sentence with no punctuation by words using spaces
words = sentNoPunctuation.split(" ")
for word in words:
wordLen = wordLen + len(word)
# Compute totals for output message
totWords = totWords + len(words) # Total words for all sentences
avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences
avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences
totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)
avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)
#Validate output by printing a test line
# print(f"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}")
print(f"\n\nParagraph Analysis of '{sourceFile}' file")
print(f"---------------------------------------------------------")
print(f" Approximate Word Count: {totWords} ")
print(f" Approximate Sentence Count: {len(paragraph)} ")
print(f" Average Letter Count: {avgLetterCount} ")
print(f" Average Sentence Length (words): {avgSentLen_Words} ")
print(f" Average Sentence Length (chars): {avgSentLen_chars} ")
|
flexible
|
{
"blob_id": "3cd7abf9659fe1db0ef3aa58df8dd7fd959e10a6",
"index": 386,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-3": "<mask token>\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-4": "import os\nimport csv\nimport re\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split('\\n\\n')\nfor sentence in paragraph:\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub('[^\\\\w\\\\s]', '', sentence)\n words = sentNoPunctuation.split(' ')\n for word in words:\n wordLen = wordLen + len(word)\n totWords = totWords + len(words)\n avgSentLen_Words = round(totWords / len(paragraph), 2)\n avgLetterCount = round(wordLen / totWords, 2)\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph), 2)\nprint(f\"\"\"\n\nParagraph Analysis of '{sourceFile}' file\"\"\")\nprint(f'---------------------------------------------------------')\nprint(f' Approximate Word Count: {totWords} ')\nprint(f' Approximate Sentence Count: {len(paragraph)} ')\nprint(f' Average Letter Count: {avgLetterCount} ')\nprint(f' Average Sentence Length (words): {avgSentLen_Words} ')\nprint(f' Average Sentence Length (chars): {avgSentLen_chars} ')\n",
"step-5": "import os\nimport csv\nimport re\n\ntotWords = 0\nwordLen = 0\ntotSentWithPunctuation = 0\n\nsourceFile = os.path.join('Resources', 'paragraph_2.txt')\n\nwith open(sourceFile, 'r') as paragraph:\n paragraph = paragraph.read().split(\"\\n\\n\")\n\n\nfor sentence in paragraph:\n # Remove punctuation from sentences\n sentWithPunctuation = sentence\n sentNoPunctuation = re.sub(r'[^\\w\\s]','',sentence)\n\n #Split sentence with no punctuation by words using spaces\n words = sentNoPunctuation.split(\" \")\n for word in words:\n wordLen = wordLen + len(word)\n\n # Compute totals for output message \n totWords = totWords + len(words) # Total words for all sentences\n avgSentLen_Words = round(totWords / len(paragraph),2) # Average words for all sentences\n avgLetterCount = round(wordLen/totWords,2) # Average letter by word for all sentences\n totSentWithPunctuation = totSentWithPunctuation + len(sentWithPunctuation)\n avgSentLen_chars = round(totSentWithPunctuation / len(paragraph),2)\n\n #Validate output by printing a test line\n # print(f\"words: {len(words)} S w Punct. len: {len(sentWithPunctuation)} Sentence: {sentWithPunctuation}\")\n\nprint(f\"\\n\\nParagraph Analysis of '{sourceFile}' file\")\nprint(f\"---------------------------------------------------------\")\nprint(f\" Approximate Word Count: {totWords} \")\nprint(f\" Approximate Sentence Count: {len(paragraph)} \")\nprint(f\" Average Letter Count: {avgLetterCount} \")\nprint(f\" Average Sentence Length (words): {avgSentLen_Words} \")\nprint(f\" Average Sentence Length (chars): {avgSentLen_chars} \")\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
<|reserved_special_token_0|>
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l3 = ['a', 'b', 'c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
def itertools_count_example():
for item in count(start=1, step=1):
print(item)
<|reserved_special_token_0|>
def itertools_chain_example(iterator1, iterator2):
l = []
for item in chain(iterator1, iterator2):
l.append(item)
print(l)
def itertools_islice_example(iterator):
l = []
for item in islice(iterator, 0, 10, 2):
l.append(item)
print(l)
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l3 = ['a', 'b', 'c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
def itertools_count_example():
for item in count(start=1, step=1):
print(item)
def itertools_repeat_example():
for item in repeat(10, 5):
print(3)
def itertools_chain_example(iterator1, iterator2):
l = []
for item in chain(iterator1, iterator2):
l.append(item)
print(l)
def itertools_islice_example(iterator):
l = []
for item in islice(iterator, 0, 10, 2):
l.append(item)
print(l)
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l3 = ['a', 'b', 'c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
l = [(1, 2, 9), (1, 3, 12), (2, 3, 8), (2, 4, 4), (2, 5, 7), (3, 5, 5), (3,
6, 2), (4, 5, 2), (4, 7, 10), (5, 6, 11), (5, 7, 2), (6, 8, 4), (7, 8,
4), (7, 9, 3), (8, 9, 13)]
b = ['America', 'Sudan', 'Srilanka', 'Pakistan', 'Nepal', 'India', 'France']
<|reserved_special_token_0|>
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x > 10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
def itertools_count_example():
for item in count(start=1, step=1):
print(item)
def itertools_repeat_example():
for item in repeat(10, 5):
print(3)
def itertools_chain_example(iterator1, iterator2):
l = []
for item in chain(iterator1, iterator2):
l.append(item)
print(l)
def itertools_islice_example(iterator):
l = []
for item in islice(iterator, 0, 10, 2):
l.append(item)
print(l)
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
l3 = ['a', 'b', 'c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
iterator = [11, 15, 2, 5, 8, 10, 50, 8, 2, 3, 90, 80, 100]
iterator1 = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 5]
iterator2 = ['a', 'b', 'c']
<|reserved_special_token_1|>
#https://docs.python.org/3.4/library/itertools.html#module-itertools
l = [(1, 2, 9), (1, 3, 12), (2, 3, 8), (2, 4, 4), (2, 5, 7), (3, 5, 5), (3, 6, 2), (4, 5, 2), (4, 7, 10),
(5, 6, 11), (5, 7, 2), (6, 8, 4), (7, 8, 4), (7, 9, 3), (8, 9, 13)]
b = ['America', 'Sudan', 'Srilanka', 'Pakistan', 'Nepal', 'India', 'France']
from itertools import groupby, filterfalse, dropwhile, cycle, count, repeat, chain, takewhile, islice, zip_longest
from collections import defaultdict
#NOTE- always use itertools with sorted list if index of element is not issue to your solution
def itertools_groupby_example(list_of_nodes):
graph = defaultdict(list)
for key, group in groupby(l, lambda x: x[0]):
graph[key].append(list(group))
print(dict(graph))
def itertools_false_filter_example(iterator):
l = []
for item in filterfalse(lambda x :x>10, iterator):
l.append(item)
print(l)
def itertools_dropwhile_example(iterator):
l = []
for item in dropwhile(lambda x: x>10, iterator):
l.append(item)
print(l)
def itertools_takewhile_example(iterator):
l = []
print(iterator)
for item in takewhile(lambda x: x>10, iterator):
l.append(item)
print(l)
def itertools_cycle_example(iterator):
for item in cycle(iterator):
print(item)
def itertools_count_example():
for item in count(start=1, step=1):
print(item)
def itertools_repeat_example():
for item in repeat(10, 5):
print(3)
def itertools_chain_example(iterator1, iterator2):
l = []
for item in chain(iterator1, iterator2):
l.append(item)
print(l)
def itertools_islice_example(iterator):
l = []
for item in islice(iterator, 0, 10, 2):
l.append(item)
print(l)
def itertools_chain_from_iterable_examaple():
l = []
for item in chain.from_iterable([[2,3,4],[2,5,6]]):
l.append(item)
print(l)
def itertools_zip_longest():
l1 = ['red', 'orange', 'yellow', 'green', 'blue']
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,]
l3 = ['a','b','c']
for item in zip_longest(l1, l2, l3, fillvalue=None):
print(item)
iterator = [11,15,2,5,8,10,50,8,2,3,90,80,100]
iterator1 = [0,10,20,30,40,50,60,70,80,90,100,5]
iterator2 = ['a','b','c']
#itertools_false_filter_example(iterator1)
#itertools_dropwhile_example(iterator1)
#itertools_cycle_example(iterator1)
#itertools_count_example()
#itertools_repeat_example()
#itertools_chain_example(iterator1, iterator2)
#itertools_takewhile_example(iterator)
#itertools_islice_example(iterator)
#itertools_chain_from_iterable_examaple()
#itertools_zip_longest()
|
flexible
|
{
"blob_id": "629353392e3a4f346f734543ae3f2b8dc616a6c3",
"index": 5816,
"step-1": "<mask token>\n\n\ndef itertools_groupby_example(list_of_nodes):\n graph = defaultdict(list)\n for key, group in groupby(l, lambda x: x[0]):\n graph[key].append(list(group))\n print(dict(graph))\n\n\ndef itertools_false_filter_example(iterator):\n l = []\n for item in filterfalse(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_dropwhile_example(iterator):\n l = []\n for item in dropwhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_takewhile_example(iterator):\n l = []\n print(iterator)\n for item in takewhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_cycle_example(iterator):\n for item in cycle(iterator):\n print(item)\n\n\n<mask token>\n\n\ndef itertools_chain_from_iterable_examaple():\n l = []\n for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):\n l.append(item)\n print(l)\n\n\ndef itertools_zip_longest():\n l1 = ['red', 'orange', 'yellow', 'green', 'blue']\n l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n l3 = ['a', 'b', 'c']\n for item in zip_longest(l1, l2, l3, fillvalue=None):\n print(item)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef itertools_groupby_example(list_of_nodes):\n graph = defaultdict(list)\n for key, group in groupby(l, lambda x: x[0]):\n graph[key].append(list(group))\n print(dict(graph))\n\n\ndef itertools_false_filter_example(iterator):\n l = []\n for item in filterfalse(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_dropwhile_example(iterator):\n l = []\n for item in dropwhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_takewhile_example(iterator):\n l = []\n print(iterator)\n for item in takewhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_cycle_example(iterator):\n for item in cycle(iterator):\n print(item)\n\n\ndef itertools_count_example():\n for item in count(start=1, step=1):\n print(item)\n\n\n<mask token>\n\n\ndef itertools_chain_example(iterator1, iterator2):\n l = []\n for item in chain(iterator1, iterator2):\n l.append(item)\n print(l)\n\n\ndef itertools_islice_example(iterator):\n l = []\n for item in islice(iterator, 0, 10, 2):\n l.append(item)\n print(l)\n\n\ndef itertools_chain_from_iterable_examaple():\n l = []\n for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):\n l.append(item)\n print(l)\n\n\ndef itertools_zip_longest():\n l1 = ['red', 'orange', 'yellow', 'green', 'blue']\n l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n l3 = ['a', 'b', 'c']\n for item in zip_longest(l1, l2, l3, fillvalue=None):\n print(item)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef itertools_groupby_example(list_of_nodes):\n graph = defaultdict(list)\n for key, group in groupby(l, lambda x: x[0]):\n graph[key].append(list(group))\n print(dict(graph))\n\n\ndef itertools_false_filter_example(iterator):\n l = []\n for item in filterfalse(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_dropwhile_example(iterator):\n l = []\n for item in dropwhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_takewhile_example(iterator):\n l = []\n print(iterator)\n for item in takewhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_cycle_example(iterator):\n for item in cycle(iterator):\n print(item)\n\n\ndef itertools_count_example():\n for item in count(start=1, step=1):\n print(item)\n\n\ndef itertools_repeat_example():\n for item in repeat(10, 5):\n print(3)\n\n\ndef itertools_chain_example(iterator1, iterator2):\n l = []\n for item in chain(iterator1, iterator2):\n l.append(item)\n print(l)\n\n\ndef itertools_islice_example(iterator):\n l = []\n for item in islice(iterator, 0, 10, 2):\n l.append(item)\n print(l)\n\n\ndef itertools_chain_from_iterable_examaple():\n l = []\n for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):\n l.append(item)\n print(l)\n\n\ndef itertools_zip_longest():\n l1 = ['red', 'orange', 'yellow', 'green', 'blue']\n l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n l3 = ['a', 'b', 'c']\n for item in zip_longest(l1, l2, l3, fillvalue=None):\n print(item)\n\n\n<mask token>\n",
"step-4": "l = [(1, 2, 9), (1, 3, 12), (2, 3, 8), (2, 4, 4), (2, 5, 7), (3, 5, 5), (3,\n 6, 2), (4, 5, 2), (4, 7, 10), (5, 6, 11), (5, 7, 2), (6, 8, 4), (7, 8, \n 4), (7, 9, 3), (8, 9, 13)]\nb = ['America', 'Sudan', 'Srilanka', 'Pakistan', 'Nepal', 'India', 'France']\n<mask token>\n\n\ndef itertools_groupby_example(list_of_nodes):\n graph = defaultdict(list)\n for key, group in groupby(l, lambda x: x[0]):\n graph[key].append(list(group))\n print(dict(graph))\n\n\ndef itertools_false_filter_example(iterator):\n l = []\n for item in filterfalse(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_dropwhile_example(iterator):\n l = []\n for item in dropwhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_takewhile_example(iterator):\n l = []\n print(iterator)\n for item in takewhile(lambda x: x > 10, iterator):\n l.append(item)\n print(l)\n\n\ndef itertools_cycle_example(iterator):\n for item in cycle(iterator):\n print(item)\n\n\ndef itertools_count_example():\n for item in count(start=1, step=1):\n print(item)\n\n\ndef itertools_repeat_example():\n for item in repeat(10, 5):\n print(3)\n\n\ndef itertools_chain_example(iterator1, iterator2):\n l = []\n for item in chain(iterator1, iterator2):\n l.append(item)\n print(l)\n\n\ndef itertools_islice_example(iterator):\n l = []\n for item in islice(iterator, 0, 10, 2):\n l.append(item)\n print(l)\n\n\ndef itertools_chain_from_iterable_examaple():\n l = []\n for item in chain.from_iterable([[2, 3, 4], [2, 5, 6]]):\n l.append(item)\n print(l)\n\n\ndef itertools_zip_longest():\n l1 = ['red', 'orange', 'yellow', 'green', 'blue']\n l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n l3 = ['a', 'b', 'c']\n for item in zip_longest(l1, l2, l3, fillvalue=None):\n print(item)\n\n\niterator = [11, 15, 2, 5, 8, 10, 50, 8, 2, 3, 90, 80, 100]\niterator1 = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 5]\niterator2 = ['a', 'b', 'c']\n",
"step-5": "#https://docs.python.org/3.4/library/itertools.html#module-itertools\n\n\nl = [(1, 2, 9), (1, 3, 12), (2, 3, 8), (2, 4, 4), (2, 5, 7), (3, 5, 5), (3, 6, 2), (4, 5, 2), (4, 7, 10),\n (5, 6, 11), (5, 7, 2), (6, 8, 4), (7, 8, 4), (7, 9, 3), (8, 9, 13)]\n\nb = ['America', 'Sudan', 'Srilanka', 'Pakistan', 'Nepal', 'India', 'France']\n\nfrom itertools import groupby, filterfalse, dropwhile, cycle, count, repeat, chain, takewhile, islice, zip_longest\nfrom collections import defaultdict\n#NOTE- always use itertools with sorted list if index of element is not issue to your solution\n\ndef itertools_groupby_example(list_of_nodes):\n\tgraph = defaultdict(list)\n\tfor key, group in groupby(l, lambda x: x[0]):\n\t\t\tgraph[key].append(list(group))\n\tprint(dict(graph))\n\ndef itertools_false_filter_example(iterator):\n\tl = []\n\tfor item in filterfalse(lambda x :x>10, iterator):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_dropwhile_example(iterator):\n\tl = []\n\tfor item in dropwhile(lambda x: x>10, iterator):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_takewhile_example(iterator):\n\tl = []\n\tprint(iterator)\n\tfor item in takewhile(lambda x: x>10, iterator):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_cycle_example(iterator):\n\tfor item in cycle(iterator):\n\t\tprint(item)\n\ndef itertools_count_example():\n\tfor item in count(start=1, step=1):\n\t\tprint(item)\n\ndef itertools_repeat_example():\n\tfor item in repeat(10, 5):\n\t\tprint(3)\n\ndef itertools_chain_example(iterator1, iterator2):\n\tl = []\n\tfor item in chain(iterator1, iterator2):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_islice_example(iterator):\n\tl = []\n\tfor item in islice(iterator, 0, 10, 2):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_chain_from_iterable_examaple():\n\tl = []\n\tfor item in chain.from_iterable([[2,3,4],[2,5,6]]):\n\t\tl.append(item)\n\tprint(l)\n\ndef itertools_zip_longest():\n\tl1 = ['red', 'orange', 'yellow', 'green', 'blue']\n\tl2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,]\n\tl3 = ['a','b','c']\n\n\tfor item in zip_longest(l1, l2, l3, fillvalue=None):\n\t\tprint(item)\n\niterator = [11,15,2,5,8,10,50,8,2,3,90,80,100]\niterator1 = [0,10,20,30,40,50,60,70,80,90,100,5]\niterator2 = ['a','b','c']\n\n#itertools_false_filter_example(iterator1)\n#itertools_dropwhile_example(iterator1)\n#itertools_cycle_example(iterator1)\n#itertools_count_example()\n#itertools_repeat_example()\n#itertools_chain_example(iterator1, iterator2)\n#itertools_takewhile_example(iterator)\n#itertools_islice_example(iterator)\n#itertools_chain_from_iterable_examaple()\n#itertools_zip_longest()",
"step-ids": [
7,
10,
11,
12,
14
]
}
|
[
7,
10,
11,
12,
14
] |
import os
import numpy as np
import pandas as pd
import random
import platform
import subprocess
import shlex
import teradata
from joblib import dump
import shutil
from tqdm import tqdm
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1', logConsole=False)
session = udaExec.connect(method='odbc',
system=db, # Сервер ТД из файла
username=usr, # Логин TD
password=pwd, # Пароль TD
driver = driver,
charset='UTF8',
autoCommit='True',
USEREGIONALSETTINGS='N',
transactionMode = 'TERADATA'
)
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser("~")
config_path = os.path.join(path, ".twbcfg.ini")
log_path = os.path.join(path, "tmp", "teradata_logs")
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = f'''CheckpointDirectory='{log_path}'
LogDirectory='{log_path}' '''
with open(config_path, 'w') as f:
f.write(config)
def td_download(query="",
bd="tdsb15.cgs.sbrf.ru",
username="", password="",
fast=False, return_df=False, csv=True,
chunksize=100000):
"""
Функция возвращает данные из ТД: путь к csv или датафрейм.
fast=True - использовать утилиты ТД, False - ODBC;
return_df - вернуть датафрейм;
csv - записать данные в файл при fast=False;
chunksize - размер бача для ODBC;
query должен содержать where, чтобы выгрузить название столбцов из БД
"""
local_seed = str(random.randint(0, 1000000))
query = query.replace("\n", " ")
if not fast:
# Teradata python package
session = get_session(bd, username, password)
frame = sql2df(query, session, chunksize=chunksize)
session.close()
if return_df:
return frame
else:
path_to_file = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if csv:
filename = path_to_file + ".csv"
frame.to_csv(filename, sep=';', index=False, encoding="utf8")
return filename
else:
dump(frame, path_to_file)
return path_to_file
else:
# FastLoad
check_config()
query = query.replace("'", "''") # prepair query for FastLoad
path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
os.mkdir(path_to_folder)
else:
os.mkdir(path_to_folder)
path_to_file = os.path.join(path_to_folder, 'dataset.csv')
open(path_to_file, 'w').close()
# Create utility files
txt = '''SourceTdpId = '%s'
,SourceUserName = '%s'
,SourceUserPassword = '%s'
,DDLPrivateLogName = 'ddlprivate.log'
,ExportPrivateLogName = 'exportprivate.log'
,TargetErrorList = ['3807']
,TargetFileName = '%s'
,TargetFormat = 'delimited'
,TargetTextDelimiter = ';'
,TargetOpenMode = 'write'
,SelectStmt = '%s' ''' % (bd, username, password, path_to_file, query)
qtxt = '''USING CHAR SET UTF-8
DEFINE JOB qstart2
(
APPLY TO OPERATOR ($FILE_WRITER)
SELECT * FROM OPERATOR($EXPORT);
);'''
with open(path_to_folder + '/qstart2.txt', 'w+') as f:
f.write(qtxt)
with open(path_to_folder + '/jobvars.txt', 'w+') as f:
f.write(txt)
# run FastLoad
# p = subprocess.Popen(
# shlex.split(f"tbuild -f {path_to_folder}/qstart2.txt -v {path_to_folder}/jobvars.txt -j qstart2")
# )
# p.wait()
p = subprocess.run(
shlex.split(f"tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# columns names
query = query.replace("\n", " ").replace("''","'")
query = query.lower()
query_list = query.split("where")
if len(query_list) == 2:
columns_query = " where 1=0 and ".join(query_list)
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
else:
print("Coudn't load columns names")
columns_names = None
if not return_df:
if columns_names:
with open(path_to_folder + '/columns_names.txt', 'w') as f:
f.write("\n".join(columns_names))
return path_to_file
else:
if columns_names:
frame = pd.read_csv(path_to_file, names=columns_names, delimiter=';')
else:
frame = pd.read_csv(path_to_file, header=None, delimiter=';')
return frame
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(
username="", password="",
bd="tdsb15.cgs.sbrf.ru", tbl_name="",
schema="SBX_RETAIL_MP_PFM",
loadframe=True, df=None, path_to_file=None, fast=False,
batch_size=12000, max_sessions=6, buffersize=524288,
):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + "." + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=False)
# insert
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})",
[list(row.values()) for row in df_dict[i * batch_size:i * batch_size + batch_size]],
batch=True
)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), "data", "output_" + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=";", encoding="utf8")
converted_len = converted.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype="str", header=None, encoding="utf8",
low_memory=False, nrows=100000)
columns_query = f"select * from {table} where 1=0"
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
shutil.copy(path_to_file, path_to_folder + "/tmp.csv") # cp file for correct working Change to move&
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
# create empty tmp table
td_temp_table = table + "_tmp_" + local_seed # change schema
session = get_session(bd, username, password)
session.execute(
f"create multiset table {td_temp_table} as {table} with no data no primary index"
)
session.close()
# Create utility file
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value*2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(
f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(
f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
# Start TPT load
p = subprocess.Popen(
shlex.split(f"tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}")
)
p.wait()
# Merge
print("Merging in Teradata... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"insert into {table} sel * from {td_temp_table}")
session.close()
# Drop temporary table
print("Cleaning... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"drop table {td_temp_table}")
session.close()
# Cleanup
shutil.rmtree(path_to_folder)
print("Done!")
|
normal
|
{
"blob_id": "a05c94ae0ee41cfef5687f741e07a54ae793e40d",
"index": 2183,
"step-1": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\n<mask token>\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\n<mask token>\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-3": "<mask token>\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\ndef td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',\n fast=False, return_df=False, csv=True, chunksize=100000):\n \"\"\"\n Функция возвращает данные из ТД: путь к csv или датафрейм.\n\n fast=True - использовать утилиты ТД, False - ODBC;\n return_df - вернуть датафрейм;\n csv - записать данные в файл при fast=False;\n chunksize - размер бача для ODBC;\n query должен содержать where, чтобы выгрузить название столбцов из БД\n\n \"\"\"\n local_seed = str(random.randint(0, 1000000))\n query = query.replace('\\n', ' ')\n if not fast:\n session = get_session(bd, username, password)\n frame = sql2df(query, session, chunksize=chunksize)\n session.close()\n if return_df:\n return frame\n else:\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if csv:\n filename = path_to_file + '.csv'\n frame.to_csv(filename, sep=';', index=False, encoding='utf8')\n return filename\n else:\n dump(frame, path_to_file)\n return path_to_file\n else:\n check_config()\n query = query.replace(\"'\", \"''\")\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n os.mkdir(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\n open(path_to_file, 'w').close()\n txt = (\n \"\"\"SourceTdpId = '%s'\n ,SourceUserName = '%s' \n ,SourceUserPassword = '%s'\n ,DDLPrivateLogName = 'ddlprivate.log'\n ,ExportPrivateLogName = 'exportprivate.log'\n ,TargetErrorList = ['3807']\n ,TargetFileName = '%s'\n ,TargetFormat = 'delimited'\n ,TargetTextDelimiter = ';'\n ,TargetOpenMode = 'write'\n ,SelectStmt = '%s' \"\"\"\n % (bd, username, password, path_to_file, query))\n qtxt = \"\"\"USING CHAR SET UTF-8\n DEFINE JOB qstart2\n (\n APPLY TO OPERATOR ($FILE_WRITER)\n SELECT * FROM OPERATOR($EXPORT);\n );\"\"\"\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\n f.write(qtxt)\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\n f.write(txt)\n p = subprocess.run(shlex.split(\n f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'\n ), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n query = query.replace('\\n', ' ').replace(\"''\", \"'\")\n query = query.lower()\n query_list = query.split('where')\n if len(query_list) == 2:\n columns_query = ' where 1=0 and '.join(query_list)\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n else:\n print(\"Coudn't load columns names\")\n columns_names = None\n if not return_df:\n if columns_names:\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\n f.write('\\n'.join(columns_names))\n return path_to_file\n else:\n if columns_names:\n frame = pd.read_csv(path_to_file, names=columns_names,\n delimiter=';')\n else:\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\n return frame\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-4": "import os\nimport numpy as np\nimport pandas as pd\nimport random\nimport platform\nimport subprocess\nimport shlex\nimport teradata\nfrom joblib import dump\nimport shutil\nfrom tqdm import tqdm\n\n\ndef get_session(db, usr, pwd):\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\n if platform.system() == 'Windows':\n driver = 'Teradata'\n else:\n driver = 'Teradata Database ODBC Driver 16.20'\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',\n logConsole=False)\n session = udaExec.connect(method='odbc', system=db, username=usr,\n password=pwd, driver=driver, charset='UTF8', autoCommit='True',\n USEREGIONALSETTINGS='N', transactionMode='TERADATA')\n return session\n\n\ndef sql2df(query, session, chunksize=100000):\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\n db = pd.read_sql(query, session, chunksize=chunksize)\n data = pd.DataFrame()\n for x in tqdm(db):\n data = pd.concat([data, x])\n return data\n\n\ndef check_config():\n \"\"\" .twbcfg.ini to root path \"\"\"\n path = os.path.expanduser('~')\n config_path = os.path.join(path, '.twbcfg.ini')\n log_path = os.path.join(path, 'tmp', 'teradata_logs')\n if not os.path.exists(config_path):\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n config = (\n f\"CheckpointDirectory='{log_path}' \\n LogDirectory='{log_path}' \"\n )\n with open(config_path, 'w') as f:\n f.write(config)\n\n\ndef td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',\n fast=False, return_df=False, csv=True, chunksize=100000):\n \"\"\"\n Функция возвращает данные из ТД: путь к csv или датафрейм.\n\n fast=True - использовать утилиты ТД, False - ODBC;\n return_df - вернуть датафрейм;\n csv - записать данные в файл при fast=False;\n chunksize - размер бача для ODBC;\n query должен содержать where, чтобы выгрузить название столбцов из БД\n\n \"\"\"\n local_seed = str(random.randint(0, 1000000))\n query = query.replace('\\n', ' ')\n if not fast:\n session = get_session(bd, username, password)\n frame = sql2df(query, session, chunksize=chunksize)\n session.close()\n if return_df:\n return frame\n else:\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if csv:\n filename = path_to_file + '.csv'\n frame.to_csv(filename, sep=';', index=False, encoding='utf8')\n return filename\n else:\n dump(frame, path_to_file)\n return path_to_file\n else:\n check_config()\n query = query.replace(\"'\", \"''\")\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n os.mkdir(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\n open(path_to_file, 'w').close()\n txt = (\n \"\"\"SourceTdpId = '%s'\n ,SourceUserName = '%s' \n ,SourceUserPassword = '%s'\n ,DDLPrivateLogName = 'ddlprivate.log'\n ,ExportPrivateLogName = 'exportprivate.log'\n ,TargetErrorList = ['3807']\n ,TargetFileName = '%s'\n ,TargetFormat = 'delimited'\n ,TargetTextDelimiter = ';'\n ,TargetOpenMode = 'write'\n ,SelectStmt = '%s' \"\"\"\n % (bd, username, password, path_to_file, query))\n qtxt = \"\"\"USING CHAR SET UTF-8\n DEFINE JOB qstart2\n (\n APPLY TO OPERATOR ($FILE_WRITER)\n SELECT * FROM OPERATOR($EXPORT);\n );\"\"\"\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\n f.write(qtxt)\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\n f.write(txt)\n p = subprocess.run(shlex.split(\n f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'\n ), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n query = query.replace('\\n', ' ').replace(\"''\", \"'\")\n query = query.lower()\n query_list = query.split('where')\n if len(query_list) == 2:\n columns_query = ' where 1=0 and '.join(query_list)\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n else:\n print(\"Coudn't load columns names\")\n columns_names = None\n if not return_df:\n if columns_names:\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\n f.write('\\n'.join(columns_names))\n return path_to_file\n else:\n if columns_names:\n frame = pd.read_csv(path_to_file, names=columns_names,\n delimiter=';')\n else:\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\n return frame\n\n\ndef py2td(x):\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\n x_type = type(x)\n if x_type == float:\n if x % 1 == 0:\n return int(x)\n else:\n return x\n elif x == 'null':\n return None\n else:\n return x\n\n\ndef td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=\n '', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=\n None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):\n \"\"\"\n Функция записывате данные в ТД через утилиты или ODBC\n\n \"\"\"\n table = schema + '.' + tbl_name\n if not fast:\n if not loadframe:\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=\n False)\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\n df_dict = df.to_dict('records')\n session = get_session(bd, username, password)\n for i in tqdm(range(n_iters), total=n_iters):\n session.executemany(\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\"\n , [list(row.values()) for row in df_dict[i * batch_size:i *\n batch_size + batch_size]], batch=True)\n session.close()\n else:\n check_config()\n local_seed = str(random.randint(0, 1000000))\n path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +\n local_seed)\n if os.path.exists(path_to_folder):\n shutil.rmtree(path_to_folder)\n else:\n os.mkdir(path_to_folder)\n if loadframe:\n converted = df.replace(np.NaN, '').astype(str)\n path_to_file = path_to_folder + '/tmp.csv'\n converted.to_csv(path_to_file, index=False, header=False, sep=\n ';', encoding='utf8')\n converted_len = converted.apply(lambda x: x.str.encode('utf-8')\n .apply(len)).max().to_dict()\n else:\n converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',\n header=None, encoding='utf8', low_memory=False, nrows=100000)\n columns_query = f'select * from {table} where 1=0'\n session = get_session(bd, username, password)\n columns_names = pd.read_sql(columns_query, session).columns.tolist(\n )\n session.close()\n shutil.copy(path_to_file, path_to_folder + '/tmp.csv')\n converted_len.columns = columns_names\n converted_len = converted_len.apply(lambda x: x.str.encode(\n 'utf-8').apply(len)).max().to_dict()\n td_temp_table = table + '_tmp_' + local_seed\n session = get_session(bd, username, password)\n session.execute(\n f'create multiset table {td_temp_table} as {table} with no data no primary index'\n )\n session.close()\n txt = f\"\"\"USING CHARACTER SET UTF8\n DEFINE JOB teradata_upload\n Description 'Fastload script'\n (\n DEFINE OPERATOR Load_operator\n TYPE LOAD\n SCHEMA *\n ATTRIBUTES\n (\n VARCHAR TdPid='{bd}',\n VARCHAR UserName='{username}',\n VARCHAR UserPassWord='{password}',\n VARCHAR TargetTable='{td_temp_table}',\n VARCHAR LogTable='{schema}.usr_tpt_log',\n VARCHAR DateForm='AnsiDate',\n INTEGER MaxSessions={max_sessions}\n );\n\n DEFINE SCHEMA Define_Employee_Schema\n (\n {','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())} \n );\n\n DEFINE OPERATOR Producer_File_Detail\n TYPE DATACONNECTOR PRODUCER\n SCHEMA Define_Employee_Schema\n ATTRIBUTES\n (\n VARCHAR DirectoryPath='{path_to_folder}/'\n , VARCHAR FileName='tmp.csv'\n , VARCHAR TextDelimiter=';'\n , VARCHAR QuotedData = 'Optional'\n , VARCHAR OpenQuoteMark = '\"'\n , VARCHAR CloseQuoteMark = '\"'\n , VARCHAR Format='Delimited'\n , VARCHAR OpenMode='Read'\n , VARCHAR INDICATORMODE='N'\n , INTEGER BUFFERSIZE = {buffersize}\n );\n\n APPLY\n (\n 'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'\n )\n TO OPERATOR(Load_operator)\n\n SELECT * FROM OPERATOR (Producer_File_Detail);\n );\"\"\"\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\n f.write(txt)\n p = subprocess.Popen(shlex.split(\n f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))\n p.wait()\n print('Merging in Teradata... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'insert into {table} sel * from {td_temp_table}')\n session.close()\n print('Cleaning... \\r', end='', flush=True)\n session = get_session(bd, username, password)\n session.execute(f'drop table {td_temp_table}')\n session.close()\n shutil.rmtree(path_to_folder)\n print('Done!')\n",
"step-5": "import os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport random\r\nimport platform\r\nimport subprocess\r\nimport shlex\r\nimport teradata\r\nfrom joblib import dump\r\nimport shutil\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef get_session(db, usr, pwd):\r\n \"\"\"Функция устанавливает соединение с ТД и возвращает сессию\"\"\"\r\n\r\n if platform.system() == 'Windows':\r\n driver = 'Teradata'\r\n else:\r\n driver = 'Teradata Database ODBC Driver 16.20'\r\n\r\n udaExec = teradata.UdaExec(appName='DataLoad', version='0.1', logConsole=False)\r\n session = udaExec.connect(method='odbc',\r\n system=db, # Сервер ТД из файла\r\n username=usr, # Логин TD\r\n password=pwd, # Пароль TD\r\n driver = driver,\r\n charset='UTF8',\r\n autoCommit='True',\r\n USEREGIONALSETTINGS='N',\r\n transactionMode = 'TERADATA'\r\n )\r\n return session\r\n\r\n\r\ndef sql2df(query, session, chunksize=100000):\r\n \"\"\" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу \"\"\"\r\n db = pd.read_sql(query, session, chunksize=chunksize)\r\n data = pd.DataFrame()\r\n for x in tqdm(db):\r\n data = pd.concat([data, x])\r\n return data\r\n\r\n\r\ndef check_config():\r\n \"\"\" .twbcfg.ini to root path \"\"\"\r\n path = os.path.expanduser(\"~\")\r\n config_path = os.path.join(path, \".twbcfg.ini\")\r\n log_path = os.path.join(path, \"tmp\", \"teradata_logs\")\r\n\r\n if not os.path.exists(config_path):\r\n if not os.path.exists(log_path):\r\n os.mkdir(log_path)\r\n config = f'''CheckpointDirectory='{log_path}' \r\n LogDirectory='{log_path}' '''\r\n with open(config_path, 'w') as f:\r\n f.write(config)\r\n\r\n\r\n\r\ndef td_download(query=\"\",\r\n bd=\"tdsb15.cgs.sbrf.ru\",\r\n username=\"\", password=\"\",\r\n fast=False, return_df=False, csv=True,\r\n chunksize=100000):\r\n \"\"\"\r\n Функция возвращает данные из ТД: путь к csv или датафрейм.\r\n\r\n fast=True - использовать утилиты ТД, False - ODBC;\r\n return_df - вернуть датафрейм;\r\n csv - записать данные в файл при fast=False;\r\n chunksize - размер бача для ODBC;\r\n query должен содержать where, чтобы выгрузить название столбцов из БД\r\n\r\n \"\"\"\r\n local_seed = str(random.randint(0, 1000000))\r\n query = query.replace(\"\\n\", \" \")\r\n\r\n if not fast:\r\n # Teradata python package\r\n session = get_session(bd, username, password)\r\n frame = sql2df(query, session, chunksize=chunksize)\r\n session.close()\r\n if return_df:\r\n return frame\r\n else:\r\n path_to_file = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)\r\n if csv:\r\n filename = path_to_file + \".csv\"\r\n frame.to_csv(filename, sep=';', index=False, encoding=\"utf8\")\r\n return filename\r\n else:\r\n dump(frame, path_to_file)\r\n return path_to_file\r\n else:\r\n # FastLoad\r\n check_config()\r\n query = query.replace(\"'\", \"''\") # prepair query for FastLoad\r\n path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)\r\n\r\n if os.path.exists(path_to_folder):\r\n shutil.rmtree(path_to_folder)\r\n os.mkdir(path_to_folder)\r\n else:\r\n os.mkdir(path_to_folder)\r\n\r\n path_to_file = os.path.join(path_to_folder, 'dataset.csv')\r\n open(path_to_file, 'w').close()\r\n\r\n # Create utility files\r\n txt = '''SourceTdpId = '%s'\r\n ,SourceUserName = '%s' \r\n ,SourceUserPassword = '%s'\r\n ,DDLPrivateLogName = 'ddlprivate.log'\r\n ,ExportPrivateLogName = 'exportprivate.log'\r\n ,TargetErrorList = ['3807']\r\n ,TargetFileName = '%s'\r\n ,TargetFormat = 'delimited'\r\n ,TargetTextDelimiter = ';'\r\n ,TargetOpenMode = 'write'\r\n ,SelectStmt = '%s' ''' % (bd, username, password, path_to_file, query)\r\n qtxt = '''USING CHAR SET UTF-8\r\n DEFINE JOB qstart2\r\n (\r\n APPLY TO OPERATOR ($FILE_WRITER)\r\n SELECT * FROM OPERATOR($EXPORT);\r\n );'''\r\n with open(path_to_folder + '/qstart2.txt', 'w+') as f:\r\n f.write(qtxt)\r\n with open(path_to_folder + '/jobvars.txt', 'w+') as f:\r\n f.write(txt)\r\n # run FastLoad\r\n# p = subprocess.Popen(\r\n# shlex.split(f\"tbuild -f {path_to_folder}/qstart2.txt -v {path_to_folder}/jobvars.txt -j qstart2\")\r\n# )\r\n# p.wait()\r\n p = subprocess.run(\r\n shlex.split(f\"tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}\"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT\r\n )\r\n\r\n # columns names\r\n query = query.replace(\"\\n\", \" \").replace(\"''\",\"'\")\r\n query = query.lower()\r\n query_list = query.split(\"where\")\r\n if len(query_list) == 2:\r\n columns_query = \" where 1=0 and \".join(query_list)\r\n session = get_session(bd, username, password)\r\n columns_names = pd.read_sql(columns_query, session).columns.tolist()\r\n session.close()\r\n else:\r\n print(\"Coudn't load columns names\")\r\n columns_names = None\r\n\r\n if not return_df:\r\n if columns_names:\r\n with open(path_to_folder + '/columns_names.txt', 'w') as f:\r\n f.write(\"\\n\".join(columns_names))\r\n return path_to_file\r\n else:\r\n if columns_names:\r\n frame = pd.read_csv(path_to_file, names=columns_names, delimiter=';')\r\n else:\r\n frame = pd.read_csv(path_to_file, header=None, delimiter=';')\r\n return frame\r\n\r\n\r\ndef py2td(x):\r\n \"\"\"Функция вставляет пропуски и корректирует тип данных под ТД\"\"\"\r\n x_type = type(x)\r\n if x_type == float:\r\n if x % 1 == 0:\r\n return int(x)\r\n else:\r\n return x\r\n elif x == 'null':\r\n return None\r\n else:\r\n return x\r\n\r\n\r\ndef td_import(\r\n username=\"\", password=\"\",\r\n bd=\"tdsb15.cgs.sbrf.ru\", tbl_name=\"\",\r\n schema=\"SBX_RETAIL_MP_PFM\",\r\n loadframe=True, df=None, path_to_file=None, fast=False,\r\n batch_size=12000, max_sessions=6, buffersize=524288,\r\n):\r\n \"\"\"\r\n Функция записывате данные в ТД через утилиты или ODBC\r\n\r\n \"\"\"\r\n table = schema + \".\" + tbl_name\r\n if not fast:\r\n if not loadframe:\r\n df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=False)\r\n # insert\r\n n_iters = len(df) // batch_size + (len(df) % batch_size > 0)\r\n df_dict = df.to_dict('records')\r\n session = get_session(bd, username, password)\r\n for i in tqdm(range(n_iters), total=n_iters):\r\n session.executemany(\r\n f\"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})\",\r\n [list(row.values()) for row in df_dict[i * batch_size:i * batch_size + batch_size]],\r\n batch=True\r\n )\r\n session.close()\r\n else:\r\n check_config()\r\n local_seed = str(random.randint(0, 1000000))\r\n path_to_folder = os.path.join(os.getcwd(), \"data\", \"output_\" + local_seed)\r\n\r\n if os.path.exists(path_to_folder):\r\n shutil.rmtree(path_to_folder)\r\n else:\r\n os.mkdir(path_to_folder)\r\n\r\n if loadframe:\r\n converted = df.replace(np.NaN, '').astype(str)\r\n path_to_file = path_to_folder + '/tmp.csv'\r\n converted.to_csv(path_to_file, index=False, header=False, sep=\";\", encoding=\"utf8\")\r\n converted_len = converted.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()\r\n else:\r\n converted_len = pd.read_csv(path_to_file, sep=';', dtype=\"str\", header=None, encoding=\"utf8\",\r\n low_memory=False, nrows=100000)\r\n columns_query = f\"select * from {table} where 1=0\"\r\n session = get_session(bd, username, password)\r\n columns_names = pd.read_sql(columns_query, session).columns.tolist()\r\n session.close()\r\n shutil.copy(path_to_file, path_to_folder + \"/tmp.csv\") # cp file for correct working Change to move&\r\n\r\n converted_len.columns = columns_names\r\n converted_len = converted_len.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()\r\n\r\n # create empty tmp table\r\n td_temp_table = table + \"_tmp_\" + local_seed # change schema\r\n session = get_session(bd, username, password)\r\n session.execute(\r\n f\"create multiset table {td_temp_table} as {table} with no data no primary index\"\r\n )\r\n session.close()\r\n # Create utility file\r\n txt = f\"\"\"USING CHARACTER SET UTF8\r\n DEFINE JOB teradata_upload\r\n Description 'Fastload script'\r\n (\r\n DEFINE OPERATOR Load_operator\r\n TYPE LOAD\r\n SCHEMA *\r\n ATTRIBUTES\r\n (\r\n VARCHAR TdPid='{bd}',\r\n VARCHAR UserName='{username}',\r\n VARCHAR UserPassWord='{password}',\r\n VARCHAR TargetTable='{td_temp_table}',\r\n VARCHAR LogTable='{schema}.usr_tpt_log',\r\n VARCHAR DateForm='AnsiDate',\r\n INTEGER MaxSessions={max_sessions}\r\n );\r\n\r\n DEFINE SCHEMA Define_Employee_Schema\r\n (\r\n {','.join(f'{key} VARCHAR({max(1, value*2)})' for key, value in converted_len.items())} \r\n );\r\n\r\n DEFINE OPERATOR Producer_File_Detail\r\n TYPE DATACONNECTOR PRODUCER\r\n SCHEMA Define_Employee_Schema\r\n ATTRIBUTES\r\n (\r\n VARCHAR DirectoryPath='{path_to_folder}/'\r\n , VARCHAR FileName='tmp.csv'\r\n , VARCHAR TextDelimiter=';'\r\n , VARCHAR QuotedData = 'Optional'\r\n , VARCHAR OpenQuoteMark = '\"'\r\n , VARCHAR CloseQuoteMark = '\"'\r\n , VARCHAR Format='Delimited'\r\n , VARCHAR OpenMode='Read'\r\n , VARCHAR INDICATORMODE='N'\r\n , INTEGER BUFFERSIZE = {buffersize}\r\n );\r\n\r\n APPLY\r\n (\r\n 'INSERT INTO {td_temp_table}({','.join(\r\n f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(\r\n f'{key}' for key, value in converted_len.items())});'\r\n )\r\n TO OPERATOR(Load_operator)\r\n\r\n SELECT * FROM OPERATOR (Producer_File_Detail);\r\n );\"\"\"\r\n with open(path_to_folder + '/load_code.tpt', 'w+') as f:\r\n f.write(txt)\r\n # Start TPT load\r\n p = subprocess.Popen(\r\n shlex.split(f\"tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}\")\r\n )\r\n p.wait()\r\n # Merge\r\n print(\"Merging in Teradata... \\r\", end='', flush=True)\r\n session = get_session(bd, username, password)\r\n session.execute(f\"insert into {table} sel * from {td_temp_table}\")\r\n session.close()\r\n # Drop temporary table\r\n print(\"Cleaning... \\r\", end='', flush=True)\r\n session = get_session(bd, username, password)\r\n session.execute(f\"drop table {td_temp_table}\")\r\n session.close()\r\n # Cleanup\r\n shutil.rmtree(path_to_folder)\r\n print(\"Done!\")\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import matplotlib.pyplot as plotOp
import numpy as np
from random import randint
import re as regexOp
|
flexible
|
{
"blob_id": "6c0a1d4ffd64e0566be53937d9b48975f2530852",
"index": 7767,
"step-1": "<mask token>\n",
"step-2": "import matplotlib.pyplot as plotOp\nimport numpy as np\nfrom random import randint\nimport re as regexOp\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class player:
def __init__(self, name: str, symbol: str):
self._name = name
self._symbol = symbol
def decide_next_move(self):
"""
Checks all possible combinations to decide best next move
:return: board position
"""
pass
def get_next_move(self):
"""
Asks user for next move
:return: board position
"""
return int(input('Enter your move: '))
|
normal
|
{
"blob_id": "3cc894570189fe545f5db3150d0b69c16dc211dc",
"index": 981,
"step-1": "class player:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n <mask token>\n <mask token>\n",
"step-3": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n <mask token>\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n",
"step-4": "class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n\n def decide_next_move(self):\n \"\"\"\n Checks all possible combinations to decide best next move\n :return: board position\n \"\"\"\n pass\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# -*- coding:utf-8 -*-
import sys
import time
class ProgressBar:
@staticmethod
def progress_test():
bar_length = 100
for percent in range(0, 101):
hashes = '#' * int(percent / 100.0 * bar_length)
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rPercent: [%s] %d%%" % (hashes + spaces, percent))
sys.stdout.flush()
time.sleep(0.05)
class ProgressBar1:
def __init__(self, width=50):
self.pointer = 0
self.width = width
def __call__(self, x):
# print('\t')
self.pointer = int(self.width * (x / 100.0))
return "|" + "#" * self.pointer + "-" * (self.width - self.pointer) + "| %d %% done" % int(x)
class ProgressBar2:
def __init__(self, width=50):
self.pointer = 0
self.width = width
def __call__(self,x):
# print('\r')
self.pointer = x
return "|" + "#" * self.pointer + "-" * (100 - self.pointer)+ "| %d %% done" % int(x)
@staticmethod
def run():
# progress_test()
ProgressBar.progress_test()
# pb = ProgressBar.ProgressBar1()
# for i in range(101):
# # os.system('cls')
# print(pb(i))
# time.sleep(0.02)
#
# pb = ProgressBar.ProgressBar2()
# for i in range(101):
# # os.system('cls')
# print(pb(i))
# time.sleep(0.02)
if __name__ == '__main__':
ProgressBar.run()
|
normal
|
{
"blob_id": "f928eb34155046107c99db8ded11747d5960c767",
"index": 2527,
"step-1": "<mask token>\n\n\nclass ProgressBar:\n <mask token>\n\n\n class ProgressBar1:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = int(self.width * (x / 100.0))\n return '|' + '#' * self.pointer + '-' * (self.width - self.pointer\n ) + '| %d %% done' % int(x)\n\n\n class ProgressBar2:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = x\n return '|' + '#' * self.pointer + '-' * (100 - self.pointer\n ) + '| %d %% done' % int(x)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ProgressBar:\n\n @staticmethod\n def progress_test():\n bar_length = 100\n for percent in range(0, 101):\n hashes = '#' * int(percent / 100.0 * bar_length)\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write('\\rPercent: [%s] %d%%' % (hashes + spaces,\n percent))\n sys.stdout.flush()\n time.sleep(0.05)\n\n\n class ProgressBar1:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = int(self.width * (x / 100.0))\n return '|' + '#' * self.pointer + '-' * (self.width - self.pointer\n ) + '| %d %% done' % int(x)\n\n\n class ProgressBar2:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = x\n return '|' + '#' * self.pointer + '-' * (100 - self.pointer\n ) + '| %d %% done' % int(x)\n\n @staticmethod\n def run():\n ProgressBar.progress_test()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ProgressBar:\n\n @staticmethod\n def progress_test():\n bar_length = 100\n for percent in range(0, 101):\n hashes = '#' * int(percent / 100.0 * bar_length)\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write('\\rPercent: [%s] %d%%' % (hashes + spaces,\n percent))\n sys.stdout.flush()\n time.sleep(0.05)\n\n\n class ProgressBar1:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = int(self.width * (x / 100.0))\n return '|' + '#' * self.pointer + '-' * (self.width - self.pointer\n ) + '| %d %% done' % int(x)\n\n\n class ProgressBar2:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = x\n return '|' + '#' * self.pointer + '-' * (100 - self.pointer\n ) + '| %d %% done' % int(x)\n\n @staticmethod\n def run():\n ProgressBar.progress_test()\n\n\nif __name__ == '__main__':\n ProgressBar.run()\n",
"step-4": "import sys\nimport time\n\n\nclass ProgressBar:\n\n @staticmethod\n def progress_test():\n bar_length = 100\n for percent in range(0, 101):\n hashes = '#' * int(percent / 100.0 * bar_length)\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write('\\rPercent: [%s] %d%%' % (hashes + spaces,\n percent))\n sys.stdout.flush()\n time.sleep(0.05)\n\n\n class ProgressBar1:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = int(self.width * (x / 100.0))\n return '|' + '#' * self.pointer + '-' * (self.width - self.pointer\n ) + '| %d %% done' % int(x)\n\n\n class ProgressBar2:\n\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n self.pointer = x\n return '|' + '#' * self.pointer + '-' * (100 - self.pointer\n ) + '| %d %% done' % int(x)\n\n @staticmethod\n def run():\n ProgressBar.progress_test()\n\n\nif __name__ == '__main__':\n ProgressBar.run()\n",
"step-5": "# -*- coding:utf-8 -*-\nimport sys\nimport time\n\n\nclass ProgressBar:\n\n @staticmethod\n def progress_test():\n bar_length = 100\n for percent in range(0, 101):\n hashes = '#' * int(percent / 100.0 * bar_length)\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\"\\rPercent: [%s] %d%%\" % (hashes + spaces, percent))\n sys.stdout.flush()\n time.sleep(0.05)\n\n class ProgressBar1:\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self, x):\n # print('\\t')\n self.pointer = int(self.width * (x / 100.0))\n return \"|\" + \"#\" * self.pointer + \"-\" * (self.width - self.pointer) + \"| %d %% done\" % int(x)\n\n class ProgressBar2:\n def __init__(self, width=50):\n self.pointer = 0\n self.width = width\n\n def __call__(self,x):\n # print('\\r')\n self.pointer = x\n return \"|\" + \"#\" * self.pointer + \"-\" * (100 - self.pointer)+ \"| %d %% done\" % int(x)\n\n @staticmethod\n def run():\n # progress_test()\n ProgressBar.progress_test()\n # pb = ProgressBar.ProgressBar1()\n # for i in range(101):\n # # os.system('cls')\n # print(pb(i))\n # time.sleep(0.02)\n #\n # pb = ProgressBar.ProgressBar2()\n # for i in range(101):\n # # os.system('cls')\n # print(pb(i))\n # time.sleep(0.02)\n\n\nif __name__ == '__main__':\n ProgressBar.run()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
''' 단어 수학
시간 : 68ms (~2초), 메모리 : 29200KB (~256MB)
분류 : greedy
'''
import sys
input = sys.stdin.readline
# 입력
N = int(input()) # 단어의 개수
arr = [list(input().strip()) for _ in range(N)]
# 풀이
alphabet = []
for word in arr:
for a in word:
if a not in alphabet:
alphabet.append(a)
value_list = []
for a in alphabet:
value = 0
for word in arr:
if a not in word: # 알파벳 없으면 넘어감
continue
s = ""
for w in word:
s += "1" if w == a else "0"
value += int(s)
value_list.append(value)
value_list.sort(reverse=True) # 내림차순 정렬
answer = 0
value = 9
for s in value_list:
answer += value * s
value -= 1
# 출력
print(answer)
|
normal
|
{
"blob_id": "6efc7ff304a05dfc5a7bed7d646e5d6ac034ce85",
"index": 4706,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\n<mask token>\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\n<mask token>\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\narr = [list(input().strip()) for _ in range(N)]\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-4": "<mask token>\nimport sys\ninput = sys.stdin.readline\nN = int(input())\narr = [list(input().strip()) for _ in range(N)]\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word:\n continue\n s = ''\n for w in word:\n s += '1' if w == a else '0'\n value += int(s)\n value_list.append(value)\nvalue_list.sort(reverse=True)\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\nprint(answer)\n",
"step-5": "''' 단어 수학\n시간 : 68ms (~2초), 메모리 : 29200KB (~256MB)\n분류 : greedy\n'''\n\nimport sys\ninput = sys.stdin.readline\n\n# 입력\nN = int(input()) # 단어의 개수\narr = [list(input().strip()) for _ in range(N)]\n\n# 풀이\nalphabet = []\nfor word in arr:\n for a in word:\n if a not in alphabet:\n alphabet.append(a)\n\nvalue_list = []\nfor a in alphabet:\n value = 0\n for word in arr:\n if a not in word: # 알파벳 없으면 넘어감\n continue\n\n s = \"\"\n for w in word:\n s += \"1\" if w == a else \"0\"\n value += int(s)\n\n value_list.append(value)\n\nvalue_list.sort(reverse=True) # 내림차순 정렬\n\nanswer = 0\nvalue = 9\nfor s in value_list:\n answer += value * s\n value -= 1\n\n# 출력\nprint(answer)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from hops import constants
class Cluster(object):
"""
Represents a Cluster in Cluster Analysis computed for a featuregroup or training dataset in the featurestore
"""
def __init__(self, cluster_json):
"""
Initialize the cluster object from JSON payload
Args:
:cluster_json: JSON data of the cluster
"""
self.datapoint_name = cluster_json[constants.REST_CONFIG.
JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME]
self.cluster = int(cluster_json[constants.REST_CONFIG.
JSON_CLUSTERING_ANALYSIS_CLUSTER])
|
normal
|
{
"blob_id": "753c87a3d22aeca1001eb770831b846b175d873e",
"index": 9139,
"step-1": "<mask token>\n\n\nclass Cluster(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Cluster(object):\n <mask token>\n\n def __init__(self, cluster_json):\n \"\"\"\n Initialize the cluster object from JSON payload\n\n Args:\n :cluster_json: JSON data of the cluster\n \"\"\"\n self.datapoint_name = cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME]\n self.cluster = int(cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_CLUSTER])\n",
"step-3": "<mask token>\n\n\nclass Cluster(object):\n \"\"\"\n Represents a Cluster in Cluster Analysis computed for a featuregroup or training dataset in the featurestore\n \"\"\"\n\n def __init__(self, cluster_json):\n \"\"\"\n Initialize the cluster object from JSON payload\n\n Args:\n :cluster_json: JSON data of the cluster\n \"\"\"\n self.datapoint_name = cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME]\n self.cluster = int(cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_CLUSTER])\n",
"step-4": "from hops import constants\n\n\nclass Cluster(object):\n \"\"\"\n Represents a Cluster in Cluster Analysis computed for a featuregroup or training dataset in the featurestore\n \"\"\"\n\n def __init__(self, cluster_json):\n \"\"\"\n Initialize the cluster object from JSON payload\n\n Args:\n :cluster_json: JSON data of the cluster\n \"\"\"\n self.datapoint_name = cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_DATA_POINT_NAME]\n self.cluster = int(cluster_json[constants.REST_CONFIG.\n JSON_CLUSTERING_ANALYSIS_CLUSTER])\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
<|reserved_special_token_0|>
def print_sub_header(sub_header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +
f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)
def print_success_message(success_message_text):
print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
f' {success_message_text}: Success '.center(80, '=') + colorama.
Style.RESET_ALL)
<|reserved_special_token_0|>
def get_base_config_path(driver_code, platform_code):
base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(
driver=driver_code.name, platform=platform_code.name)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():
if child_obj.is_dir():
if (child_obj / 'molecule.yml').exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
<|reserved_special_token_0|>
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name,
None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:
headers = {'Authorization': 'token ' + os.environ[
'AO_GITHUB_OAUTH_TOKEN']}
else:
headers = None
return requests.get('https://api.github.com/repos/' + release_url,
headers=headers).json()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
<|reserved_special_token_0|>
def print_sub_header(sub_header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +
f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)
def print_success_message(success_message_text):
print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
f' {success_message_text}: Success '.center(80, '=') + colorama.
Style.RESET_ALL)
<|reserved_special_token_0|>
def get_base_config_path(driver_code, platform_code):
base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(
driver=driver_code.name, platform=platform_code.name)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():
if child_obj.is_dir():
if (child_obj / 'molecule.yml').exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform='linux', env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({'MOLECULE_USER_NAME': 'root'})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})
molecule_command = (
f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'
)
if scenario is not None:
molecule_command += f' -s {scenario}'
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name,
None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:
headers = {'Authorization': 'token ' + os.environ[
'AO_GITHUB_OAUTH_TOKEN']}
else:
headers = None
return requests.get('https://api.github.com/repos/' + release_url,
headers=headers).json()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.
center(80, '=') + colorama.Style.RESET_ALL)
def print_sub_header(sub_header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +
f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)
def print_success_message(success_message_text):
print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
f' {success_message_text}: Success '.center(80, '=') + colorama.
Style.RESET_ALL)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" + colorama
.Style.RESET_ALL)
raise
def get_base_config_path(driver_code, platform_code):
base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(
driver=driver_code.name, platform=platform_code.name)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():
if child_obj.is_dir():
if (child_obj / 'molecule.yml').exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform='linux', env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({'MOLECULE_USER_NAME': 'root'})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})
molecule_command = (
f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'
)
if scenario is not None:
molecule_command += f' -s {scenario}'
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name,
None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:
headers = {'Authorization': 'token ' + os.environ[
'AO_GITHUB_OAUTH_TOKEN']}
else:
headers = None
return requests.get('https://api.github.com/repos/' + release_url,
headers=headers).json()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=DeprecationWarning)
import invoke
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.
center(80, '=') + colorama.Style.RESET_ALL)
def print_sub_header(sub_header_text):
print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +
f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)
def print_success_message(success_message_text):
print(colorama.Fore.GREEN + colorama.Style.BRIGHT +
f' {success_message_text}: Success '.center(80, '=') + colorama.
Style.RESET_ALL)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" + colorama
.Style.RESET_ALL)
raise
def get_base_config_path(driver_code, platform_code):
base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(
driver=driver_code.name, platform=platform_code.name)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():
if child_obj.is_dir():
if (child_obj / 'molecule.yml').exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform='linux', env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({'MOLECULE_USER_NAME': 'root'})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})
molecule_command = (
f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'
)
if scenario is not None:
molecule_command += f' -s {scenario}'
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name,
None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:
headers = {'Authorization': 'token ' + os.environ[
'AO_GITHUB_OAUTH_TOKEN']}
else:
headers = None
return requests.get('https://api.github.com/repos/' + release_url,
headers=headers).json()
<|reserved_special_token_1|>
import os
import pathlib
import enum
import warnings
import colorama
import requests
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import invoke
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT +
f" {header_text} ".center(80, "=") +
colorama.Style.RESET_ALL
)
def print_sub_header(sub_header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT + "--" +
f" {sub_header_text} ".ljust(78, "-") +
colorama.Style.RESET_ALL
)
def print_success_message(success_message_text):
print(
colorama.Fore.GREEN + colorama.Style.BRIGHT +
f" {success_message_text}: Success ".center(80, "=") +
colorama.Style.RESET_ALL
)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(
colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" +
colorama.Style.RESET_ALL
)
raise
def get_base_config_path(driver_code, platform_code):
base_config = "molecule/molecule_base_{driver}_{platform}.yml".format(
driver=driver_code.name, platform=platform_code.name
)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / "molecule").iterdir():
if child_obj.is_dir():
if (child_obj / "molecule.yml").exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform="linux", env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({"MOLECULE_USER_NAME": "root"})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({"MOLECULE_USER_NAME": "vagrant"})
molecule_command = (
f"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}"
)
if scenario is not None:
molecule_command += f" -s {scenario}"
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if "AO_GITHUB_OAUTH_TOKEN" in os.environ:
headers = {"Authorization": "token " + os.environ["AO_GITHUB_OAUTH_TOKEN"]}
else:
headers = None
return requests.get(
"https://api.github.com/repos/" + release_url, headers=headers
).json()
|
flexible
|
{
"blob_id": "5bdc08b66916959d462314b8a6e5794e5fa12b55",
"index": 7986,
"step-1": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\n<mask token>\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\n<mask token>\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-2": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\n<mask token>\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-3": "<mask token>\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\ndef print_header(header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.\n center(80, '=') + colorama.Style.RESET_ALL)\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" + colorama\n .Style.RESET_ALL)\n raise\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-4": "<mask token>\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=DeprecationWarning)\n import invoke\n\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\n\ndef print_header(header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + f' {header_text} '.\n center(80, '=') + colorama.Style.RESET_ALL)\n\n\ndef print_sub_header(sub_header_text):\n print(colorama.Fore.CYAN + colorama.Style.BRIGHT + '--' +\n f' {sub_header_text} '.ljust(78, '-') + colorama.Style.RESET_ALL)\n\n\ndef print_success_message(success_message_text):\n print(colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f' {success_message_text}: Success '.center(80, '=') + colorama.\n Style.RESET_ALL)\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" + colorama\n .Style.RESET_ALL)\n raise\n\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = 'molecule/molecule_base_{driver}_{platform}.yml'.format(\n driver=driver_code.name, platform=platform_code.name)\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / 'molecule').iterdir():\n if child_obj.is_dir():\n if (child_obj / 'molecule.yml').exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform='linux', env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({'MOLECULE_USER_NAME': 'root'})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({'MOLECULE_USER_NAME': 'vagrant'})\n molecule_command = (\n f'molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}'\n )\n if scenario is not None:\n molecule_command += f' -s {scenario}'\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name,\n None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\n\ndef get_github_release_info(release_url):\n if 'AO_GITHUB_OAUTH_TOKEN' in os.environ:\n headers = {'Authorization': 'token ' + os.environ[\n 'AO_GITHUB_OAUTH_TOKEN']}\n else:\n headers = None\n return requests.get('https://api.github.com/repos/' + release_url,\n headers=headers).json()\n",
"step-5": "import os\nimport pathlib\nimport enum\nimport warnings\nimport colorama\nimport requests\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n import invoke\n\nclass MoleculeDriver(enum.Enum):\n docker = 1\n lxd = 2\n vagrant = 3\n\nclass TestPlatform(enum.Enum):\n linux = 1\n ubuntu = 2\n centos = 3\n\ndef print_header(header_text):\n print(\n colorama.Fore.CYAN + colorama.Style.BRIGHT +\n f\" {header_text} \".center(80, \"=\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef print_sub_header(sub_header_text):\n print(\n colorama.Fore.CYAN + colorama.Style.BRIGHT + \"--\" +\n f\" {sub_header_text} \".ljust(78, \"-\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef print_success_message(success_message_text):\n print(\n colorama.Fore.GREEN + colorama.Style.BRIGHT +\n f\" {success_message_text}: Success \".center(80, \"=\") +\n colorama.Style.RESET_ALL\n )\n\n\ndef run_command(context, *args, **kwargs):\n try:\n return context.run(*args, **kwargs)\n except invoke.exceptions.Failure:\n print(\n colorama.Fore.RED + colorama.Style.BRIGHT +\n \"Failure: error executing '\" + args[0] + \"' command\" +\n colorama.Style.RESET_ALL\n )\n raise\n\ndef get_base_config_path(driver_code, platform_code):\n base_config = \"molecule/molecule_base_{driver}_{platform}.yml\".format(\n driver=driver_code.name, platform=platform_code.name\n )\n return str(pathlib.Path(__file__).resolve().parent / base_config)\n\ndef get_molecule_scenarios(context):\n scenarios = []\n for child_obj in (pathlib.Path.cwd() / \"molecule\").iterdir():\n if child_obj.is_dir():\n if (child_obj / \"molecule.yml\").exists():\n scenarios.append(child_obj.name)\n return sorted(scenarios)\n\n\ndef run_molecule(context, command, scenario, driver, platform=\"linux\", env={}):\n driver_code = MoleculeDriver[driver.lower()]\n platform_code = TestPlatform[platform.lower()]\n molecule_env = env.copy()\n if driver_code == MoleculeDriver.lxd:\n molecule_env.update({\"MOLECULE_USER_NAME\": \"root\"})\n elif driver_code == MoleculeDriver.vagrant:\n molecule_env.update({\"MOLECULE_USER_NAME\": \"vagrant\"})\n molecule_command = (\n f\"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}\"\n )\n if scenario is not None:\n molecule_command += f\" -s {scenario}\"\n run_command(context, molecule_command, env=molecule_env, echo=True)\n\ndef get_parameter_value(host, ansible_var_name, param_value, default_value):\n if host.backend.HAS_RUN_ANSIBLE:\n ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)\n else:\n ansible_var_value = None\n return_value = ansible_var_value if param_value is None else param_value\n if return_value is None:\n return_value = default_value\n return return_value\n\ndef get_github_release_info(release_url):\n if \"AO_GITHUB_OAUTH_TOKEN\" in os.environ:\n headers = {\"Authorization\": \"token \" + os.environ[\"AO_GITHUB_OAUTH_TOKEN\"]}\n else:\n headers = None\n return requests.get(\n \"https://api.github.com/repos/\" + release_url, headers=headers\n ).json()\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
"""
This file is part of the tractor library.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 06, 2012.
"""
from StringIO import StringIO
from datetime import datetime
from tractor.attachment import AttachmentWrapper
from tractor.attachment import Base64Converter
from tractor.tests.base import BaseTestCase
from xmlrpclib import Binary
import zipfile
class Base64ConverterTestCase(BaseTestCase):
def test_encode_string(self):
test_str = 'This is a string for base64 conversion testing.'
exp_conv = Binary(test_str)
self.assert_equal(Base64Converter.encode_string(test_str), exp_conv)
def test_encode_stream(self):
test_stream = StringIO('This is a stream for base64 conversion testing.')
exp_conv = Binary(test_stream.read())
self.assert_equal(Base64Converter.encode_stream(test_stream), exp_conv)
test_stream.close()
def test_encode_zip_stream(self):
zip_stream = StringIO()
archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)
archive.writestr('file1', 'test stream 1')
archive.writestr('file2', 'test stream 2')
for zfile in archive.filelist: zfile.create_system = 0
archive.close()
zip_stream.seek(0)
exp_conv = Binary(zip_stream.getvalue())
self.assert_equal(Base64Converter.encode_zip_stream(zip_stream),
exp_conv)
zip_stream.close()
def test_decode_string(self):
test_str = 'This is a string for base64 conversion testing.'
conv = Base64Converter.encode_string(test_str)
self.assert_equal(Base64Converter.decode_to_string(conv), test_str)
def test_decode_stream(self):
test_stream = StringIO('This is a stream for base64 conversion testing.')
conv = Base64Converter.encode_stream(test_stream)
decoded_conv = Base64Converter.decode_to_stream(conv)
decoded_cont = decoded_conv.read()
test_stream.seek(0)
exp_cont = test_stream.read()
self.assert_equal(decoded_cont, exp_cont)
def test_decode_zip_file_data(self):
zip_stream = StringIO()
archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)
archive.writestr('file1', 'test stream 1')
archive.writestr('file2', 'test stream 2')
for zfile in archive.filelist: zfile.create_system = 0
archive.close()
zip_stream.seek(0)
conv = Base64Converter.encode_zip_stream(zip_stream)
decoded_conv = Base64Converter.decode_to_stream(conv)
ret_archive = zipfile.ZipFile(decoded_conv, 'a', zipfile.ZIP_DEFLATED,
False)
content1 = None
content2 = None
self.assert_equal(len(ret_archive.namelist()), 2)
for file_name in ret_archive.namelist():
if file_name == 'file1':
content1 = ret_archive.read(file_name)
self.assert_equal(content1, 'test stream 1')
self.assert_not_equal(content2, 'test stream 2')
else:
content2 = ret_archive.read(file_name)
self.assert_equal(content2, 'test stream 2')
self.assert_not_equal(content2, 'test stream 1')
class AttachmentTestCase(BaseTestCase):
def set_up(self):
BaseTestCase.set_up(self)
self.init_data = dict(content='Important attachment content.',
file_name='test_file1.txt',
description='A test file.',
size=14,
author='user1',
time=None)
def test_init(self):
att = AttachmentWrapper(**self.init_data)
for attr_name, exp_value in self.init_data.iteritems():
self.assert_equal(getattr(att, attr_name), exp_value)
def test_create_from_trac_data(self):
file_name = 'test_file1.txt'
description = 'A test file.'
size = len(file_name)
time = datetime
author = 'user1'
trac_data = (file_name, description, size, time, author)
att = AttachmentWrapper.create_from_trac_data(trac_data)
self.init_data['content'] = None
self.init_data['time'] = time
for attr_name, exp_value in self.init_data.iteritems():
self.assert_equal(getattr(att, attr_name), exp_value)
def test_get_base64_data_for_upload(self):
# Test string
test_str = 'This is a string for base64 conversion testing.'
self.init_data['content'] = test_str
exp_conv = Base64Converter.encode_string(test_str)
att = AttachmentWrapper(**self.init_data)
self.assert_equal(att.get_base64_data_for_upload(), exp_conv)
# Test stream
test_stream = StringIO('This is a stream for base64 conversion testing.')
exp_conv = Base64Converter.encode_stream(test_stream)
self.init_data['content'] = test_stream
att = AttachmentWrapper(**self.init_data)
self.assert_equal(att.get_base64_data_for_upload(), exp_conv)
# Test file map
file_map = dict(file1='test stream 1', file2='test stream 2')
zip_stream = StringIO()
archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)
for fn, content in file_map.iteritems(): archive.writestr(fn, content)
for zfile in archive.filelist: zfile.create_system = 0
archive.close()
zip_stream.seek(0)
exp_conv = Base64Converter.encode_zip_stream(zip_stream)
self.init_data['content'] = file_map
att = AttachmentWrapper(**self.init_data)
self.assert_equal(att.get_base64_data_for_upload(), exp_conv)
# Test error raising
self.init_data['content'] = 1
att = AttachmentWrapper(**self.init_data)
self.assert_raises(TypeError, att.get_base64_data_for_upload)
|
normal
|
{
"blob_id": "41681a80807800efc06b3912533d739dab2cd085",
"index": 1999,
"step-1": "<mask token>\n\n\nclass AttachmentTestCase(BaseTestCase):\n\n def set_up(self):\n BaseTestCase.set_up(self)\n self.init_data = dict(content='Important attachment content.',\n file_name='test_file1.txt', description='A test file.', size=14,\n author='user1', time=None)\n\n def test_init(self):\n att = AttachmentWrapper(**self.init_data)\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_create_from_trac_data(self):\n file_name = 'test_file1.txt'\n description = 'A test file.'\n size = len(file_name)\n time = datetime\n author = 'user1'\n trac_data = file_name, description, size, time, author\n att = AttachmentWrapper.create_from_trac_data(trac_data)\n self.init_data['content'] = None\n self.init_data['time'] = time\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_get_base64_data_for_upload(self):\n test_str = 'This is a string for base64 conversion testing.'\n self.init_data['content'] = test_str\n exp_conv = Base64Converter.encode_string(test_str)\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Base64Converter.encode_stream(test_stream)\n self.init_data['content'] = test_stream\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n file_map = dict(file1='test stream 1', file2='test stream 2')\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n for fn, content in file_map.iteritems():\n archive.writestr(fn, content)\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Base64Converter.encode_zip_stream(zip_stream)\n self.init_data['content'] = file_map\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n self.init_data['content'] = 1\n att = AttachmentWrapper(**self.init_data)\n self.assert_raises(TypeError, att.get_base64_data_for_upload)\n",
"step-2": "<mask token>\n\n\nclass Base64ConverterTestCase(BaseTestCase):\n <mask token>\n <mask token>\n\n def test_encode_zip_stream(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Binary(zip_stream.getvalue())\n self.assert_equal(Base64Converter.encode_zip_stream(zip_stream),\n exp_conv)\n zip_stream.close()\n\n def test_decode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n conv = Base64Converter.encode_string(test_str)\n self.assert_equal(Base64Converter.decode_to_string(conv), test_str)\n\n def test_decode_stream(self):\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n conv = Base64Converter.encode_stream(test_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n decoded_cont = decoded_conv.read()\n test_stream.seek(0)\n exp_cont = test_stream.read()\n self.assert_equal(decoded_cont, exp_cont)\n\n def test_decode_zip_file_data(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n conv = Base64Converter.encode_zip_stream(zip_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n ret_archive = zipfile.ZipFile(decoded_conv, 'a', zipfile.\n ZIP_DEFLATED, False)\n content1 = None\n content2 = None\n self.assert_equal(len(ret_archive.namelist()), 2)\n for file_name in ret_archive.namelist():\n if file_name == 'file1':\n content1 = ret_archive.read(file_name)\n self.assert_equal(content1, 'test stream 1')\n self.assert_not_equal(content2, 'test stream 2')\n else:\n content2 = ret_archive.read(file_name)\n self.assert_equal(content2, 'test stream 2')\n self.assert_not_equal(content2, 'test stream 1')\n\n\nclass AttachmentTestCase(BaseTestCase):\n\n def set_up(self):\n BaseTestCase.set_up(self)\n self.init_data = dict(content='Important attachment content.',\n file_name='test_file1.txt', description='A test file.', size=14,\n author='user1', time=None)\n\n def test_init(self):\n att = AttachmentWrapper(**self.init_data)\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_create_from_trac_data(self):\n file_name = 'test_file1.txt'\n description = 'A test file.'\n size = len(file_name)\n time = datetime\n author = 'user1'\n trac_data = file_name, description, size, time, author\n att = AttachmentWrapper.create_from_trac_data(trac_data)\n self.init_data['content'] = None\n self.init_data['time'] = time\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_get_base64_data_for_upload(self):\n test_str = 'This is a string for base64 conversion testing.'\n self.init_data['content'] = test_str\n exp_conv = Base64Converter.encode_string(test_str)\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Base64Converter.encode_stream(test_stream)\n self.init_data['content'] = test_stream\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n file_map = dict(file1='test stream 1', file2='test stream 2')\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n for fn, content in file_map.iteritems():\n archive.writestr(fn, content)\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Base64Converter.encode_zip_stream(zip_stream)\n self.init_data['content'] = file_map\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n self.init_data['content'] = 1\n att = AttachmentWrapper(**self.init_data)\n self.assert_raises(TypeError, att.get_base64_data_for_upload)\n",
"step-3": "<mask token>\n\n\nclass Base64ConverterTestCase(BaseTestCase):\n <mask token>\n\n def test_encode_stream(self):\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Binary(test_stream.read())\n self.assert_equal(Base64Converter.encode_stream(test_stream), exp_conv)\n test_stream.close()\n\n def test_encode_zip_stream(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Binary(zip_stream.getvalue())\n self.assert_equal(Base64Converter.encode_zip_stream(zip_stream),\n exp_conv)\n zip_stream.close()\n\n def test_decode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n conv = Base64Converter.encode_string(test_str)\n self.assert_equal(Base64Converter.decode_to_string(conv), test_str)\n\n def test_decode_stream(self):\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n conv = Base64Converter.encode_stream(test_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n decoded_cont = decoded_conv.read()\n test_stream.seek(0)\n exp_cont = test_stream.read()\n self.assert_equal(decoded_cont, exp_cont)\n\n def test_decode_zip_file_data(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n conv = Base64Converter.encode_zip_stream(zip_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n ret_archive = zipfile.ZipFile(decoded_conv, 'a', zipfile.\n ZIP_DEFLATED, False)\n content1 = None\n content2 = None\n self.assert_equal(len(ret_archive.namelist()), 2)\n for file_name in ret_archive.namelist():\n if file_name == 'file1':\n content1 = ret_archive.read(file_name)\n self.assert_equal(content1, 'test stream 1')\n self.assert_not_equal(content2, 'test stream 2')\n else:\n content2 = ret_archive.read(file_name)\n self.assert_equal(content2, 'test stream 2')\n self.assert_not_equal(content2, 'test stream 1')\n\n\nclass AttachmentTestCase(BaseTestCase):\n\n def set_up(self):\n BaseTestCase.set_up(self)\n self.init_data = dict(content='Important attachment content.',\n file_name='test_file1.txt', description='A test file.', size=14,\n author='user1', time=None)\n\n def test_init(self):\n att = AttachmentWrapper(**self.init_data)\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_create_from_trac_data(self):\n file_name = 'test_file1.txt'\n description = 'A test file.'\n size = len(file_name)\n time = datetime\n author = 'user1'\n trac_data = file_name, description, size, time, author\n att = AttachmentWrapper.create_from_trac_data(trac_data)\n self.init_data['content'] = None\n self.init_data['time'] = time\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_get_base64_data_for_upload(self):\n test_str = 'This is a string for base64 conversion testing.'\n self.init_data['content'] = test_str\n exp_conv = Base64Converter.encode_string(test_str)\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Base64Converter.encode_stream(test_stream)\n self.init_data['content'] = test_stream\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n file_map = dict(file1='test stream 1', file2='test stream 2')\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n for fn, content in file_map.iteritems():\n archive.writestr(fn, content)\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Base64Converter.encode_zip_stream(zip_stream)\n self.init_data['content'] = file_map\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n self.init_data['content'] = 1\n att = AttachmentWrapper(**self.init_data)\n self.assert_raises(TypeError, att.get_base64_data_for_upload)\n",
"step-4": "<mask token>\n\n\nclass Base64ConverterTestCase(BaseTestCase):\n\n def test_encode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n exp_conv = Binary(test_str)\n self.assert_equal(Base64Converter.encode_string(test_str), exp_conv)\n\n def test_encode_stream(self):\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Binary(test_stream.read())\n self.assert_equal(Base64Converter.encode_stream(test_stream), exp_conv)\n test_stream.close()\n\n def test_encode_zip_stream(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Binary(zip_stream.getvalue())\n self.assert_equal(Base64Converter.encode_zip_stream(zip_stream),\n exp_conv)\n zip_stream.close()\n\n def test_decode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n conv = Base64Converter.encode_string(test_str)\n self.assert_equal(Base64Converter.decode_to_string(conv), test_str)\n\n def test_decode_stream(self):\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n conv = Base64Converter.encode_stream(test_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n decoded_cont = decoded_conv.read()\n test_stream.seek(0)\n exp_cont = test_stream.read()\n self.assert_equal(decoded_cont, exp_cont)\n\n def test_decode_zip_file_data(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n conv = Base64Converter.encode_zip_stream(zip_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n ret_archive = zipfile.ZipFile(decoded_conv, 'a', zipfile.\n ZIP_DEFLATED, False)\n content1 = None\n content2 = None\n self.assert_equal(len(ret_archive.namelist()), 2)\n for file_name in ret_archive.namelist():\n if file_name == 'file1':\n content1 = ret_archive.read(file_name)\n self.assert_equal(content1, 'test stream 1')\n self.assert_not_equal(content2, 'test stream 2')\n else:\n content2 = ret_archive.read(file_name)\n self.assert_equal(content2, 'test stream 2')\n self.assert_not_equal(content2, 'test stream 1')\n\n\nclass AttachmentTestCase(BaseTestCase):\n\n def set_up(self):\n BaseTestCase.set_up(self)\n self.init_data = dict(content='Important attachment content.',\n file_name='test_file1.txt', description='A test file.', size=14,\n author='user1', time=None)\n\n def test_init(self):\n att = AttachmentWrapper(**self.init_data)\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_create_from_trac_data(self):\n file_name = 'test_file1.txt'\n description = 'A test file.'\n size = len(file_name)\n time = datetime\n author = 'user1'\n trac_data = file_name, description, size, time, author\n att = AttachmentWrapper.create_from_trac_data(trac_data)\n self.init_data['content'] = None\n self.init_data['time'] = time\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_get_base64_data_for_upload(self):\n test_str = 'This is a string for base64 conversion testing.'\n self.init_data['content'] = test_str\n exp_conv = Base64Converter.encode_string(test_str)\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n test_stream = StringIO(\n 'This is a stream for base64 conversion testing.')\n exp_conv = Base64Converter.encode_stream(test_stream)\n self.init_data['content'] = test_stream\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n file_map = dict(file1='test stream 1', file2='test stream 2')\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n for fn, content in file_map.iteritems():\n archive.writestr(fn, content)\n for zfile in archive.filelist:\n zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Base64Converter.encode_zip_stream(zip_stream)\n self.init_data['content'] = file_map\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n self.init_data['content'] = 1\n att = AttachmentWrapper(**self.init_data)\n self.assert_raises(TypeError, att.get_base64_data_for_upload)\n",
"step-5": "\"\"\"\nThis file is part of the tractor library.\nSee LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.\n\nCreated on Jan 06, 2012.\n\"\"\"\n\nfrom StringIO import StringIO\nfrom datetime import datetime\nfrom tractor.attachment import AttachmentWrapper\nfrom tractor.attachment import Base64Converter\nfrom tractor.tests.base import BaseTestCase\nfrom xmlrpclib import Binary\nimport zipfile\n\n\nclass Base64ConverterTestCase(BaseTestCase):\n\n def test_encode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n exp_conv = Binary(test_str)\n self.assert_equal(Base64Converter.encode_string(test_str), exp_conv)\n\n def test_encode_stream(self):\n test_stream = StringIO('This is a stream for base64 conversion testing.')\n exp_conv = Binary(test_stream.read())\n self.assert_equal(Base64Converter.encode_stream(test_stream), exp_conv)\n test_stream.close()\n\n def test_encode_zip_stream(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist: zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Binary(zip_stream.getvalue())\n self.assert_equal(Base64Converter.encode_zip_stream(zip_stream),\n exp_conv)\n zip_stream.close()\n\n def test_decode_string(self):\n test_str = 'This is a string for base64 conversion testing.'\n conv = Base64Converter.encode_string(test_str)\n self.assert_equal(Base64Converter.decode_to_string(conv), test_str)\n\n def test_decode_stream(self):\n test_stream = StringIO('This is a stream for base64 conversion testing.')\n conv = Base64Converter.encode_stream(test_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n decoded_cont = decoded_conv.read()\n test_stream.seek(0)\n exp_cont = test_stream.read()\n self.assert_equal(decoded_cont, exp_cont)\n\n def test_decode_zip_file_data(self):\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n archive.writestr('file1', 'test stream 1')\n archive.writestr('file2', 'test stream 2')\n for zfile in archive.filelist: zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n conv = Base64Converter.encode_zip_stream(zip_stream)\n decoded_conv = Base64Converter.decode_to_stream(conv)\n ret_archive = zipfile.ZipFile(decoded_conv, 'a', zipfile.ZIP_DEFLATED,\n False)\n content1 = None\n content2 = None\n self.assert_equal(len(ret_archive.namelist()), 2)\n for file_name in ret_archive.namelist():\n if file_name == 'file1':\n content1 = ret_archive.read(file_name)\n self.assert_equal(content1, 'test stream 1')\n self.assert_not_equal(content2, 'test stream 2')\n else:\n content2 = ret_archive.read(file_name)\n self.assert_equal(content2, 'test stream 2')\n self.assert_not_equal(content2, 'test stream 1')\n\nclass AttachmentTestCase(BaseTestCase):\n\n def set_up(self):\n BaseTestCase.set_up(self)\n self.init_data = dict(content='Important attachment content.',\n file_name='test_file1.txt',\n description='A test file.',\n size=14,\n author='user1',\n time=None)\n\n def test_init(self):\n att = AttachmentWrapper(**self.init_data)\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_create_from_trac_data(self):\n file_name = 'test_file1.txt'\n description = 'A test file.'\n size = len(file_name)\n time = datetime\n author = 'user1'\n trac_data = (file_name, description, size, time, author)\n att = AttachmentWrapper.create_from_trac_data(trac_data)\n self.init_data['content'] = None\n self.init_data['time'] = time\n for attr_name, exp_value in self.init_data.iteritems():\n self.assert_equal(getattr(att, attr_name), exp_value)\n\n def test_get_base64_data_for_upload(self):\n # Test string\n test_str = 'This is a string for base64 conversion testing.'\n self.init_data['content'] = test_str\n exp_conv = Base64Converter.encode_string(test_str)\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n # Test stream\n test_stream = StringIO('This is a stream for base64 conversion testing.')\n exp_conv = Base64Converter.encode_stream(test_stream)\n self.init_data['content'] = test_stream\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n # Test file map\n file_map = dict(file1='test stream 1', file2='test stream 2')\n zip_stream = StringIO()\n archive = zipfile.ZipFile(zip_stream, 'a', zipfile.ZIP_DEFLATED, False)\n for fn, content in file_map.iteritems(): archive.writestr(fn, content)\n for zfile in archive.filelist: zfile.create_system = 0\n archive.close()\n zip_stream.seek(0)\n exp_conv = Base64Converter.encode_zip_stream(zip_stream)\n self.init_data['content'] = file_map\n att = AttachmentWrapper(**self.init_data)\n self.assert_equal(att.get_base64_data_for_upload(), exp_conv)\n # Test error raising\n self.init_data['content'] = 1\n att = AttachmentWrapper(**self.init_data)\n self.assert_raises(TypeError, att.get_base64_data_for_upload)\n",
"step-ids": [
5,
10,
11,
12,
14
]
}
|
[
5,
10,
11,
12,
14
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <[email protected]>
#
# Distributed under terms of the MIT license.
"""
You are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.
Example 1:
Input: amount = 5, coins = [1, 2, 5]
Output: 4
Explanation: there are four ways to make up the amount:
5=5
5=2+2+1
5=2+1+1+1
5=1+1+1+1+1
Example 2:
Input: amount = 3, coins = [2]
Output: 0
Explanation: the amount of 3 cannot be made up just with coins of 2.
Example 3:
Input: amount = 10, coins = [10]
Output: 1
Note:
You can assume that
1. 0 <= amount <= 5000
2. 1 <= coin <= 5000
3. the number of coins is less than 500
4. the answer is guaranteed to fit into signed 32-bit integer
"""
import sys
from functools import lru_cache
from typing import List
import pytest
class Solution:
def change(self, amount: int, coins: List[int]) -> int:
coins = sorted(coins, reverse=True)
@lru_cache(None)
def rec(i, amount):
if i == len(coins):
return 1 if amount == 0 else 0
return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))
return rec(0, amount)
@pytest.mark.parametrize('amount, coins, expected', [
(5, [1,2,5], 4),
(3, [2], 0),
(10, [10], 1),
])
def test(amount, coins, expected):
assert expected == Solution().change(amount, coins)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
normal
|
{
"blob_id": "332c530d221c9441d6ff3646f8e9226dc78067f9",
"index": 2902,
"step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-4": "<mask token>\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n\n def change(self, amount: int, coins: List[int]) ->int:\n coins = sorted(coins, reverse=True)\n\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i + 1, amount - c) for c in range(0, amount + 1,\n coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [(5, [1, 2, 5], 4), (3,\n [2], 0), (10, [10], 1)])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main(['-s', '-v'] + sys.argv))\n",
"step-5": "\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2020 sungminoh <[email protected]>\n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nYou are given coins of different denominations and a total amount of money. Write a function to compute the number of combinations that make up that amount. You may assume that you have infinite number of each kind of coin.\n\nExample 1:\n\nInput: amount = 5, coins = [1, 2, 5]\nOutput: 4\nExplanation: there are four ways to make up the amount:\n5=5\n5=2+2+1\n5=2+1+1+1\n5=1+1+1+1+1\n\nExample 2:\n\nInput: amount = 3, coins = [2]\nOutput: 0\nExplanation: the amount of 3 cannot be made up just with coins of 2.\n\nExample 3:\n\nInput: amount = 10, coins = [10]\nOutput: 1\n\nNote:\n\nYou can assume that\n 1. 0 <= amount <= 5000\n 2. 1 <= coin <= 5000\n 3. the number of coins is less than 500\n 4. the answer is guaranteed to fit into signed 32-bit integer\n\"\"\"\nimport sys\nfrom functools import lru_cache\nfrom typing import List\nimport pytest\n\n\nclass Solution:\n def change(self, amount: int, coins: List[int]) -> int:\n coins = sorted(coins, reverse=True)\n @lru_cache(None)\n def rec(i, amount):\n if i == len(coins):\n return 1 if amount == 0 else 0\n return sum(rec(i+1, amount-c) for c in range(0, amount+1, coins[i]))\n return rec(0, amount)\n\n\[email protected]('amount, coins, expected', [\n (5, [1,2,5], 4),\n (3, [2], 0),\n (10, [10], 1),\n])\ndef test(amount, coins, expected):\n assert expected == Solution().change(amount, coins)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main([\"-s\", \"-v\"] + sys.argv))\n\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
class InowasFlopyCalculationAdapter:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
<|reserved_special_token_0|>
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
<|reserved_special_token_0|>
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InowasFlopyCalculationAdapter:
<|reserved_special_token_0|>
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from .BasAdapter import BasAdapter
from .ChdAdapter import ChdAdapter
from .DisAdapter import DisAdapter
from .GhbAdapter import GhbAdapter
from .LpfAdapter import LpfAdapter
from .MfAdapter import MfAdapter
from .NwtAdapter import NwtAdapter
from .OcAdapter import OcAdapter
from .PcgAdapter import PcgAdapter
from .RchAdapter import RchAdapter
from .RivAdapter import RivAdapter
from .ReadBudget import ReadBudget
from .ReadDrawdown import ReadDrawdown
from .ReadHead import ReadHead
from .UpwAdapter import UpwAdapter
from .WelAdapter import WelAdapter
from .LmtAdapter import LmtAdapter
from .MtAdapter import MtAdapter
from .AdvAdapter import AdvAdapter
from .BtnAdapter import BtnAdapter
from .DspAdapter import DspAdapter
from .GcgAdapter import GcgAdapter
from .LktAdapter import LktAdapter
from .PhcAdapter import PhcAdapter
from .RctAdapter import RctAdapter
from .SftAdapter import SftAdapter
from .SsmAdapter import SsmAdapter
from .TobAdapter import TobAdapter
from .UztAdapter import UztAdapter
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
<|reserved_special_token_1|>
"""
This module is an intermediate layer between flopy version 3.2
and the inowas-modflow-configuration format.
Author: Ralf Junghanns
EMail: [email protected]
"""
from .BasAdapter import BasAdapter
from .ChdAdapter import ChdAdapter
from .DisAdapter import DisAdapter
from .GhbAdapter import GhbAdapter
from .LpfAdapter import LpfAdapter
from .MfAdapter import MfAdapter
from .NwtAdapter import NwtAdapter
from .OcAdapter import OcAdapter
from .PcgAdapter import PcgAdapter
from .RchAdapter import RchAdapter
from .RivAdapter import RivAdapter
from .ReadBudget import ReadBudget
from .ReadDrawdown import ReadDrawdown
from .ReadHead import ReadHead
from .UpwAdapter import UpwAdapter
from .WelAdapter import WelAdapter
from .LmtAdapter import LmtAdapter
from .MtAdapter import MtAdapter
from .AdvAdapter import AdvAdapter
from .BtnAdapter import BtnAdapter
from .DspAdapter import DspAdapter
from .GcgAdapter import GcgAdapter
from .LktAdapter import LktAdapter
from .PhcAdapter import PhcAdapter
from .RctAdapter import RctAdapter
from .SftAdapter import SftAdapter
from .SsmAdapter import SsmAdapter
from .TobAdapter import TobAdapter
from .UztAdapter import UztAdapter
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = [
'mf', 'dis', 'bas', 'bas6',
'riv', 'wel', 'rch', 'chd', 'ghb',
'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6'
]
mt_package_order = [
"mt", "btn", "adv", "dsp", "gcg", "ssm", "lkt",
"phc", "rct", "sft", "tob", "uzt"
]
def __init__(self, version, data, uuid):
self._mf_data = data.get("mf")
self._mt_data = data.get("mt")
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get("write_input"):
self.write_input_model(self._mf)
if self._mf_data.get("run_model"):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get("write_input"):
self.write_input_model(self._mt)
if self._mt_data.get("run_model"):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data["packages"]:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
# Modlfow packages
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
# MT3D packages
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
flexible
|
{
"blob_id": "fb64003c1acbddcbe952a17edcbf293a54ef28ae",
"index": 2185,
"step-1": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n <mask token>\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n <mask token>\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n <mask token>\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-3": "<mask token>\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-4": "<mask token>\nfrom .BasAdapter import BasAdapter\nfrom .ChdAdapter import ChdAdapter\nfrom .DisAdapter import DisAdapter\nfrom .GhbAdapter import GhbAdapter\nfrom .LpfAdapter import LpfAdapter\nfrom .MfAdapter import MfAdapter\nfrom .NwtAdapter import NwtAdapter\nfrom .OcAdapter import OcAdapter\nfrom .PcgAdapter import PcgAdapter\nfrom .RchAdapter import RchAdapter\nfrom .RivAdapter import RivAdapter\nfrom .ReadBudget import ReadBudget\nfrom .ReadDrawdown import ReadDrawdown\nfrom .ReadHead import ReadHead\nfrom .UpwAdapter import UpwAdapter\nfrom .WelAdapter import WelAdapter\nfrom .LmtAdapter import LmtAdapter\nfrom .MtAdapter import MtAdapter\nfrom .AdvAdapter import AdvAdapter\nfrom .BtnAdapter import BtnAdapter\nfrom .DspAdapter import DspAdapter\nfrom .GcgAdapter import GcgAdapter\nfrom .LktAdapter import LktAdapter\nfrom .PhcAdapter import PhcAdapter\nfrom .RctAdapter import RctAdapter\nfrom .SftAdapter import SftAdapter\nfrom .SsmAdapter import SsmAdapter\nfrom .TobAdapter import TobAdapter\nfrom .UztAdapter import UztAdapter\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',\n 'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']\n mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',\n 'phc', 'rct', 'sft', 'tob', 'uzt']\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get('mf')\n self._mt_data = data.get('mt')\n self._version = version\n self._uuid = uuid\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n if self._mf_data.get('write_input'):\n self.write_input_model(self._mf)\n if self._mf_data.get('run_model'):\n self._report += self.run_model(self._mf)\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n if self._mt_data.get('write_input'):\n self.write_input_model(self._mt)\n if self._mt_data.get('run_model'):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data['packages']:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n return response\n\n def response_message(self):\n return self._report\n",
"step-5": "\"\"\"\nThis module is an intermediate layer between flopy version 3.2\nand the inowas-modflow-configuration format.\n\nAuthor: Ralf Junghanns\nEMail: [email protected]\n\"\"\"\n\nfrom .BasAdapter import BasAdapter\nfrom .ChdAdapter import ChdAdapter\nfrom .DisAdapter import DisAdapter\nfrom .GhbAdapter import GhbAdapter\nfrom .LpfAdapter import LpfAdapter\nfrom .MfAdapter import MfAdapter\nfrom .NwtAdapter import NwtAdapter\nfrom .OcAdapter import OcAdapter\nfrom .PcgAdapter import PcgAdapter\nfrom .RchAdapter import RchAdapter\nfrom .RivAdapter import RivAdapter\nfrom .ReadBudget import ReadBudget\nfrom .ReadDrawdown import ReadDrawdown\nfrom .ReadHead import ReadHead\nfrom .UpwAdapter import UpwAdapter\nfrom .WelAdapter import WelAdapter\nfrom .LmtAdapter import LmtAdapter\nfrom .MtAdapter import MtAdapter\nfrom .AdvAdapter import AdvAdapter\nfrom .BtnAdapter import BtnAdapter\nfrom .DspAdapter import DspAdapter\nfrom .GcgAdapter import GcgAdapter\nfrom .LktAdapter import LktAdapter\nfrom .PhcAdapter import PhcAdapter\nfrom .RctAdapter import RctAdapter\nfrom .SftAdapter import SftAdapter\nfrom .SsmAdapter import SsmAdapter\nfrom .TobAdapter import TobAdapter\nfrom .UztAdapter import UztAdapter\n\n\nclass InowasFlopyCalculationAdapter:\n \"\"\"The Flopy Class\"\"\"\n\n _version = None\n _uuid = None\n _mf = None\n _mt = None\n _report = ''\n\n mf_package_order = [\n 'mf', 'dis', 'bas', 'bas6',\n 'riv', 'wel', 'rch', 'chd', 'ghb',\n 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6'\n ]\n\n mt_package_order = [\n \"mt\", \"btn\", \"adv\", \"dsp\", \"gcg\", \"ssm\", \"lkt\",\n \"phc\", \"rct\", \"sft\", \"tob\", \"uzt\"\n ]\n\n def __init__(self, version, data, uuid):\n self._mf_data = data.get(\"mf\")\n self._mt_data = data.get(\"mt\")\n self._version = version\n self._uuid = uuid\n\n if self._mf_data is not None:\n package_content = self.read_packages(self._mf_data)\n self.create_model(self.mf_package_order, package_content)\n\n if self._mf_data.get(\"write_input\"):\n self.write_input_model(self._mf)\n\n if self._mf_data.get(\"run_model\"):\n self._report += self.run_model(self._mf)\n\n if self._mt_data is not None:\n package_content = self.read_packages(self._mt_data)\n self.create_model(self.mt_package_order, package_content)\n\n if self._mt_data.get(\"write_input\"):\n self.write_input_model(self._mt)\n\n if self._mt_data.get(\"run_model\"):\n self._report += self.run_model(self._mt)\n\n @staticmethod\n def read_packages(data):\n package_content = {}\n for package in data[\"packages\"]:\n print('Read Flopy Package: %s' % package)\n package_content[package.lower()] = data[package]\n return package_content\n\n def create_model(self, package_order, package_content):\n for package in package_order:\n if package in package_content:\n print('Create Flopy Package: %s' % package)\n self.create_package(package, package_content[package])\n\n @staticmethod\n def write_input_model(model):\n print('Write %s input files' % model)\n model.write_input()\n\n @staticmethod\n def run_model(model):\n print('Run the %s model' % model)\n print(model.namefile)\n print(model.exe_name)\n success, report = model.run_model(report=True, silent=True)\n return ' \\n'.join(str(e) for e in report + [success])\n\n def check_model(self):\n if self._mf is not None:\n self._mf.check()\n if self._mt is not None:\n self._mt.check()\n\n def create_package(self, name, content):\n # Modlfow packages\n if name == 'mf':\n self._mf = MfAdapter(content).get_package()\n if name == 'dis':\n DisAdapter(content).get_package(self._mf)\n if name == 'bas' or name == 'bas6':\n BasAdapter(content).get_package(self._mf)\n if name == 'lpf':\n LpfAdapter(content).get_package(self._mf)\n if name == 'upw':\n UpwAdapter(content).get_package(self._mf)\n if name == 'pcg':\n PcgAdapter(content).get_package(self._mf)\n if name == 'nwt':\n NwtAdapter(content).get_package(self._mf)\n if name == 'oc':\n OcAdapter(content).get_package(self._mf)\n if name == 'riv':\n RivAdapter(content).get_package(self._mf)\n if name == 'wel':\n WelAdapter(content).get_package(self._mf)\n if name == 'rch':\n RchAdapter(content).get_package(self._mf)\n if name == 'chd':\n ChdAdapter(content).get_package(self._mf)\n if name == 'ghb':\n GhbAdapter(content).get_package(self._mf)\n if name == 'lmt':\n LmtAdapter(content).get_package(self._mf)\n\n # MT3D packages\n if name == 'mt':\n self._mt = MtAdapter(content).get_package(self._mf)\n if name == 'adv':\n AdvAdapter(content).get_package(self._mt)\n if name == 'btn':\n BtnAdapter(content).get_package(self._mt)\n if name == 'dsp':\n DspAdapter(content).get_package(self._mt)\n if name == 'gcg':\n GcgAdapter(content).get_package(self._mt)\n if name == 'lkt':\n LktAdapter(content).get_package(self._mt)\n if name == 'phc':\n PhcAdapter(content).get_package(self._mt)\n if name == 'rct':\n RctAdapter(content).get_package(self._mt)\n if name == 'sft':\n SftAdapter(content).get_package(self._mt)\n if name == 'ssm':\n SsmAdapter(content).get_package(self._mt)\n if name == 'tob':\n TobAdapter(content).get_package(self._mt)\n if name == 'uzt':\n UztAdapter(content).get_package(self._mt)\n\n def response(self):\n key = 'mf'\n if 'MF' in self._mf_data:\n key = 'MF'\n\n heads = ReadHead(self._mf_data[key]['model_ws'])\n drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])\n budgets = ReadBudget(self._mf_data[key]['model_ws'])\n response = {}\n response['heads'] = heads.read_times()\n response['drawdowns'] = drawdowns.read_times()\n response['budgets'] = budgets.read_times()\n response['number_of_layers'] = heads.read_number_of_layers()\n\n return response\n\n def response_message(self):\n return self._report\n",
"step-ids": [
7,
11,
12,
13,
14
]
}
|
[
7,
11,
12,
13,
14
] |
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print("Start:", problem.getStartState())
print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"""
"*** YOUR CODE HERE ***"
# Frontier stored in a Stack
frontier = util.Stack()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def breadthFirstSearch(problem):
"""
Search the shallowest nodes in the search tree first.
"""
"*** YOUR CODE HERE ***"
# BFS is identical to DFS, save for the data structure used to store the frontier
# Frontier stored in a Queue
frontier = util.Queue()
# Visited states stored in a list
visitedStates = []
# Format of each element: (current coordinates, [path taken to get there])
frontier.push((problem.getStartState(), []))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
frontier.push((coordinates, pathTaken + [direction]))
util.raiseNotDefined()
def uniformCostSearch(problem):
"Search the node of least total cost first. "
"*** YOUR CODE HERE ***"
#UCS is similar to DFS and BFS, save for a few key differences
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), cost)
frontier.push((problem.getStartState(), []), 0)
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier
newCost = problem.getCostOfActions(pathTaken + [direction])
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
"*** YOUR CODE HERE ***"
# A* is different in that the heuristic argument provided is included in some parts
# Frontier stored in a Priority Queue
frontier = util.PriorityQueue()
# Visited states stored in a list
visitedStates = []
# Format of each element: ((current coordinates, [path taken to get there]), heuristic function)
frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))
# while there are still states to explore
while not frontier.isEmpty():
# store the current state and path in separate variables
currentState, pathTaken = frontier.pop()
# for skipping states that have already been visited
if currentState in visitedStates:
continue
# for returning the correct path to the goal state upon discovering it
if problem.isGoalState(currentState):
return pathTaken
# count the current state as "visited"
visitedStates.append(currentState)
# for each successor state, check whether they have already been visited.
for coordinates, direction, cost in problem.getSuccessors(currentState):
if coordinates not in visitedStates:
# if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier. Here, unlike UCS, the heuristic function is added to the newCost variable
newCost = problem.getCostOfActions(pathTaken + [direction]) + heuristic(coordinates, problem)
frontier.push((coordinates, pathTaken + [direction]), newCost)
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
|
normal
|
{
"blob_id": "e7b96c0161e65f3f22f2ad0832fc6d1bb529f150",
"index": 9772,
"step-1": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Queue()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Stack()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.Queue()\n visitedStates = []\n frontier.push((problem.getStartState(), []))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n frontier.push((coordinates, pathTaken + [direction]))\n util.raiseNotDefined()\n\n\ndef uniformCostSearch(problem):\n \"\"\"Search the node of least total cost first. \"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), 0)\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction])\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"\"\"*** YOUR CODE HERE ***\"\"\"\n frontier = util.PriorityQueue()\n visitedStates = []\n frontier.push((problem.getStartState(), []), heuristic(problem.\n getStartState(), problem))\n while not frontier.isEmpty():\n currentState, pathTaken = frontier.pop()\n if currentState in visitedStates:\n continue\n if problem.isGoalState(currentState):\n return pathTaken\n visitedStates.append(currentState)\n for coordinates, direction, cost in problem.getSuccessors(currentState\n ):\n if coordinates not in visitedStates:\n newCost = problem.getCostOfActions(pathTaken + [direction]\n ) + heuristic(coordinates, problem)\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n util.raiseNotDefined()\n\n\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n",
"step-5": "\"\"\"\nIn search.py, you will implement generic search algorithms which are called\nby Pacman agents (in searchAgents.py).\n\"\"\"\n\nimport util\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self):\n \"\"\"\n Returns the start state for the search problem\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state):\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state):\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples,\n (successor, action, stepCost), where 'successor' is a\n successor to the current state, 'action' is the action\n required to get there, and 'stepCost' is the incremental\n cost of expanding to that successor\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions. The sequence must\n be composed of legal moves\n \"\"\"\n util.raiseNotDefined()\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other\n maze, the sequence of moves will be incorrect, so only use this for tinyMaze\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]\n\ndef depthFirstSearch(problem):\n \"\"\"\n Search the deepest nodes in the search tree first\n\n Your search algorithm needs to return a list of actions that reaches\n the goal. Make sure to implement a graph search algorithm\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # Frontier stored in a Stack\n frontier = util.Stack()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there]) \n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n \n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n \n frontier.push((coordinates, pathTaken + [direction]))\n\n\n util.raiseNotDefined()\n\ndef breadthFirstSearch(problem):\n \"\"\"\n Search the shallowest nodes in the search tree first.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # BFS is identical to DFS, save for the data structure used to store the frontier\n\n # Frontier stored in a Queue\n frontier = util.Queue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: (current coordinates, [path taken to get there])\n frontier.push((problem.getStartState(), []))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. if not, add their coordinates to the frontier, and append their respective direction to the path list\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n\n frontier.push((coordinates, pathTaken + [direction]))\n\n util.raiseNotDefined()\n\ndef uniformCostSearch(problem):\n \"Search the node of least total cost first. \"\n \"*** YOUR CODE HERE ***\"\n\n #UCS is similar to DFS and BFS, save for a few key differences\n\n # Frontier stored in a Priority Queue\n frontier = util.PriorityQueue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: ((current coordinates, [path taken to get there]), cost)\n frontier.push((problem.getStartState(), []), 0)\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited. \n \n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n # if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier\n newCost = problem.getCostOfActions(pathTaken + [direction])\n\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n\n util.raiseNotDefined()\n\ndef nullHeuristic(state, problem=None):\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\ndef aStarSearch(problem, heuristic=nullHeuristic):\n \"Search the node that has the lowest combined cost and heuristic first.\"\n \"*** YOUR CODE HERE ***\"\n\n # A* is different in that the heuristic argument provided is included in some parts\n\n # Frontier stored in a Priority Queue\n frontier = util.PriorityQueue()\n\n # Visited states stored in a list\n visitedStates = []\n\n # Format of each element: ((current coordinates, [path taken to get there]), heuristic function)\n frontier.push((problem.getStartState(), []), heuristic(problem.getStartState(), problem))\n\n # while there are still states to explore\n while not frontier.isEmpty():\n\n # store the current state and path in separate variables\n currentState, pathTaken = frontier.pop()\n\n # for skipping states that have already been visited\n if currentState in visitedStates:\n continue\n\n # for returning the correct path to the goal state upon discovering it\n if problem.isGoalState(currentState):\n return pathTaken\n\n # count the current state as \"visited\"\n visitedStates.append(currentState)\n\n # for each successor state, check whether they have already been visited.\n for coordinates, direction, cost in problem.getSuccessors(currentState):\n\n if coordinates not in visitedStates:\n # if not, re-calculate the cost to reach the given coordinates, and push the updated information to the frontier. Here, unlike UCS, the heuristic function is added to the newCost variable\n newCost = problem.getCostOfActions(pathTaken + [direction]) + heuristic(coordinates, problem)\n\n frontier.push((coordinates, pathTaken + [direction]), newCost)\n\n util.raiseNotDefined()\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n",
"step-ids": [
8,
10,
12,
13,
15
]
}
|
[
8,
10,
12,
13,
15
] |
<|reserved_special_token_0|>
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',
'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k, v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k: v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/' + filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print('stop already in collection, skipping')
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
<|reserved_special_token_0|>
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
show(repeated)
saveStops(stops)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',
'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k, v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k: v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/' + filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print('stop already in collection, skipping')
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
def show(stops):
for stop_id, stop in stops.items():
print(stop_id, stop)
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
show(repeated)
saveStops(stops)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lgr = logging.getLogger(__name__)
lgr.log('hello')
<|reserved_special_token_0|>
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',
'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k, v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k: v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/' + filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print('stop already in collection, skipping')
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
def show(stops):
for stop_id, stop in stops.items():
print(stop_id, stop)
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
show(repeated)
saveStops(stops)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import logging
lgr = logging.getLogger(__name__)
lgr.log('hello')
import database
import csv
import codecs
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',
'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k, v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k: v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/' + filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print('stop already in collection, skipping')
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
def show(stops):
for stop_id, stop in stops.items():
print(stop_id, stop)
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
show(repeated)
saveStops(stops)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
lgr = logging.getLogger(__name__)
lgr.log("hello")
import database
import csv
import codecs
class Stop(object):
"""docstring for Stop"""
def __init__(self, arg):
self.fields = [
'stop_id',
'stop_name',
'stop_lat',
'stop_lon',
'stop_calle',
'stop_numero',
'stop_entre',
'stop_esquina'
]
self.d = {}
self.parse(arg)
def __repr__(self):
return str(self.d)
def parse(self, dictParams):
for k,v in dictParams.items():
if str(k) in 'stop_id':
v = int(v)
if type(v) is str:
v = codecs.decode(v, 'utf-8')
if k in self.fields:
self.d.update({k:v})
def save(self, db):
db.insert('stops', **self.d)
def saveStops(stops):
db = database.dbInterface('../database/cba-1.0.1.sqlite')
for stop_id, stop in stops.items():
stop.save(db)
db.close()
def addFromFile(stops, filename):
repeated = {}
with open('../incoming/'+ filename) as csvFile:
reader = csv.DictReader(csvFile)
for r in reader:
stop_id = r['stop_id']
stop = Stop(r)
if stop_id in stops:
if stop.d != stops[stop_id].d:
pass
repeated[stop_id] = stop
print("stop already in collection, skipping")
print(r)
print(stops[stop_id])
else:
stops[stop_id] = stop
return repeated
def show(stops):
for stop_id, stop in stops.items():
print(stop_id, stop)
def main():
stops = {}
repeated = addFromFile(stops, 'asf/stops.csv')
repeated.update(addFromFile(stops, 'ccba/stops.csv'))
repeated.update(addFromFile(stops, 'coniferal/stops.csv'))
repeated.update(addFromFile(stops, 'ersa/stops.csv'))
# show(stops)
show(repeated)
saveStops(stops)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "39ecbf914b0b2b25ce4290eac4198199b90f95e0",
"index": 5384,
"step-1": "<mask token>\n\n\nclass Stop(object):\n \"\"\"docstring for Stop\"\"\"\n\n def __init__(self, arg):\n self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',\n 'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']\n self.d = {}\n self.parse(arg)\n\n def __repr__(self):\n return str(self.d)\n\n def parse(self, dictParams):\n for k, v in dictParams.items():\n if str(k) in 'stop_id':\n v = int(v)\n if type(v) is str:\n v = codecs.decode(v, 'utf-8')\n if k in self.fields:\n self.d.update({k: v})\n\n def save(self, db):\n db.insert('stops', **self.d)\n\n\ndef saveStops(stops):\n db = database.dbInterface('../database/cba-1.0.1.sqlite')\n for stop_id, stop in stops.items():\n stop.save(db)\n db.close()\n\n\ndef addFromFile(stops, filename):\n repeated = {}\n with open('../incoming/' + filename) as csvFile:\n reader = csv.DictReader(csvFile)\n for r in reader:\n stop_id = r['stop_id']\n stop = Stop(r)\n if stop_id in stops:\n if stop.d != stops[stop_id].d:\n pass\n repeated[stop_id] = stop\n print('stop already in collection, skipping')\n print(r)\n print(stops[stop_id])\n else:\n stops[stop_id] = stop\n return repeated\n\n\n<mask token>\n\n\ndef main():\n stops = {}\n repeated = addFromFile(stops, 'asf/stops.csv')\n repeated.update(addFromFile(stops, 'ccba/stops.csv'))\n repeated.update(addFromFile(stops, 'coniferal/stops.csv'))\n repeated.update(addFromFile(stops, 'ersa/stops.csv'))\n show(repeated)\n saveStops(stops)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Stop(object):\n \"\"\"docstring for Stop\"\"\"\n\n def __init__(self, arg):\n self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',\n 'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']\n self.d = {}\n self.parse(arg)\n\n def __repr__(self):\n return str(self.d)\n\n def parse(self, dictParams):\n for k, v in dictParams.items():\n if str(k) in 'stop_id':\n v = int(v)\n if type(v) is str:\n v = codecs.decode(v, 'utf-8')\n if k in self.fields:\n self.d.update({k: v})\n\n def save(self, db):\n db.insert('stops', **self.d)\n\n\ndef saveStops(stops):\n db = database.dbInterface('../database/cba-1.0.1.sqlite')\n for stop_id, stop in stops.items():\n stop.save(db)\n db.close()\n\n\ndef addFromFile(stops, filename):\n repeated = {}\n with open('../incoming/' + filename) as csvFile:\n reader = csv.DictReader(csvFile)\n for r in reader:\n stop_id = r['stop_id']\n stop = Stop(r)\n if stop_id in stops:\n if stop.d != stops[stop_id].d:\n pass\n repeated[stop_id] = stop\n print('stop already in collection, skipping')\n print(r)\n print(stops[stop_id])\n else:\n stops[stop_id] = stop\n return repeated\n\n\ndef show(stops):\n for stop_id, stop in stops.items():\n print(stop_id, stop)\n\n\ndef main():\n stops = {}\n repeated = addFromFile(stops, 'asf/stops.csv')\n repeated.update(addFromFile(stops, 'ccba/stops.csv'))\n repeated.update(addFromFile(stops, 'coniferal/stops.csv'))\n repeated.update(addFromFile(stops, 'ersa/stops.csv'))\n show(repeated)\n saveStops(stops)\n\n\n<mask token>\n",
"step-3": "<mask token>\nlgr = logging.getLogger(__name__)\nlgr.log('hello')\n<mask token>\n\n\nclass Stop(object):\n \"\"\"docstring for Stop\"\"\"\n\n def __init__(self, arg):\n self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',\n 'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']\n self.d = {}\n self.parse(arg)\n\n def __repr__(self):\n return str(self.d)\n\n def parse(self, dictParams):\n for k, v in dictParams.items():\n if str(k) in 'stop_id':\n v = int(v)\n if type(v) is str:\n v = codecs.decode(v, 'utf-8')\n if k in self.fields:\n self.d.update({k: v})\n\n def save(self, db):\n db.insert('stops', **self.d)\n\n\ndef saveStops(stops):\n db = database.dbInterface('../database/cba-1.0.1.sqlite')\n for stop_id, stop in stops.items():\n stop.save(db)\n db.close()\n\n\ndef addFromFile(stops, filename):\n repeated = {}\n with open('../incoming/' + filename) as csvFile:\n reader = csv.DictReader(csvFile)\n for r in reader:\n stop_id = r['stop_id']\n stop = Stop(r)\n if stop_id in stops:\n if stop.d != stops[stop_id].d:\n pass\n repeated[stop_id] = stop\n print('stop already in collection, skipping')\n print(r)\n print(stops[stop_id])\n else:\n stops[stop_id] = stop\n return repeated\n\n\ndef show(stops):\n for stop_id, stop in stops.items():\n print(stop_id, stop)\n\n\ndef main():\n stops = {}\n repeated = addFromFile(stops, 'asf/stops.csv')\n repeated.update(addFromFile(stops, 'ccba/stops.csv'))\n repeated.update(addFromFile(stops, 'coniferal/stops.csv'))\n repeated.update(addFromFile(stops, 'ersa/stops.csv'))\n show(repeated)\n saveStops(stops)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import logging\nlgr = logging.getLogger(__name__)\nlgr.log('hello')\nimport database\nimport csv\nimport codecs\n\n\nclass Stop(object):\n \"\"\"docstring for Stop\"\"\"\n\n def __init__(self, arg):\n self.fields = ['stop_id', 'stop_name', 'stop_lat', 'stop_lon',\n 'stop_calle', 'stop_numero', 'stop_entre', 'stop_esquina']\n self.d = {}\n self.parse(arg)\n\n def __repr__(self):\n return str(self.d)\n\n def parse(self, dictParams):\n for k, v in dictParams.items():\n if str(k) in 'stop_id':\n v = int(v)\n if type(v) is str:\n v = codecs.decode(v, 'utf-8')\n if k in self.fields:\n self.d.update({k: v})\n\n def save(self, db):\n db.insert('stops', **self.d)\n\n\ndef saveStops(stops):\n db = database.dbInterface('../database/cba-1.0.1.sqlite')\n for stop_id, stop in stops.items():\n stop.save(db)\n db.close()\n\n\ndef addFromFile(stops, filename):\n repeated = {}\n with open('../incoming/' + filename) as csvFile:\n reader = csv.DictReader(csvFile)\n for r in reader:\n stop_id = r['stop_id']\n stop = Stop(r)\n if stop_id in stops:\n if stop.d != stops[stop_id].d:\n pass\n repeated[stop_id] = stop\n print('stop already in collection, skipping')\n print(r)\n print(stops[stop_id])\n else:\n stops[stop_id] = stop\n return repeated\n\n\ndef show(stops):\n for stop_id, stop in stops.items():\n print(stop_id, stop)\n\n\ndef main():\n stops = {}\n repeated = addFromFile(stops, 'asf/stops.csv')\n repeated.update(addFromFile(stops, 'ccba/stops.csv'))\n repeated.update(addFromFile(stops, 'coniferal/stops.csv'))\n repeated.update(addFromFile(stops, 'ersa/stops.csv'))\n show(repeated)\n saveStops(stops)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nlgr = logging.getLogger(__name__)\nlgr.log(\"hello\")\nimport database\nimport csv\nimport codecs\nclass Stop(object):\n \"\"\"docstring for Stop\"\"\"\n def __init__(self, arg):\n self.fields = [\n 'stop_id',\n 'stop_name',\n 'stop_lat',\n 'stop_lon',\n 'stop_calle',\n 'stop_numero',\n 'stop_entre',\n 'stop_esquina'\n ]\n self.d = {}\n self.parse(arg)\n\n def __repr__(self):\n return str(self.d)\n\n def parse(self, dictParams):\n for k,v in dictParams.items():\n if str(k) in 'stop_id':\n v = int(v)\n if type(v) is str:\n v = codecs.decode(v, 'utf-8')\n if k in self.fields:\n self.d.update({k:v})\n def save(self, db):\n db.insert('stops', **self.d)\n\ndef saveStops(stops):\n db = database.dbInterface('../database/cba-1.0.1.sqlite')\n for stop_id, stop in stops.items():\n stop.save(db)\n db.close()\n\ndef addFromFile(stops, filename):\n repeated = {}\n with open('../incoming/'+ filename) as csvFile:\n reader = csv.DictReader(csvFile)\n for r in reader:\n stop_id = r['stop_id']\n stop = Stop(r)\n if stop_id in stops:\n if stop.d != stops[stop_id].d:\n pass\n repeated[stop_id] = stop\n print(\"stop already in collection, skipping\")\n print(r)\n print(stops[stop_id])\n else:\n stops[stop_id] = stop\n return repeated\n\ndef show(stops):\n for stop_id, stop in stops.items():\n print(stop_id, stop)\n \ndef main():\n stops = {}\n repeated = addFromFile(stops, 'asf/stops.csv')\n repeated.update(addFromFile(stops, 'ccba/stops.csv'))\n repeated.update(addFromFile(stops, 'coniferal/stops.csv'))\n repeated.update(addFromFile(stops, 'ersa/stops.csv'))\n\n \n # show(stops)\n show(repeated)\n\n saveStops(stops)\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
for i in range(1, 11):
encabezado = 'Tabla del {}'
print(encabezado.format(i))
print()
for j in range(1, 11):
salida = '{} x {} = {}'
print(salida.format(i, j, i * j))
else:
print()
<|reserved_special_token_1|>
# Autor : Kevin Oswaldo Palacios Jimenez
# Fecha de creacion: 16/09/19
# Se genera un bucle con for
# al no tener argumento print no genera ningun cambio
# mas que continuar a la siguiente linea
for i in range (1,11):
encabezado="Tabla del {}"
print(encabezado.format(i))
print()
# Usaremos un for dentro de otro generando un bucle mas
for j in range(1,11):
# en donde i tendremos la base
# con j tendriamos el elemento
salida="{} x {} = {}"
print(salida.format(i,j,i*j))
else:
# con el bucle teniendo su proceso iterativo
# se saltaran las linea pero ejecutando el codigo
print()
|
flexible
|
{
"blob_id": "86f365612e9f15e7658160ecab1d3d9970ca364e",
"index": 9699,
"step-1": "<mask token>\n",
"step-2": "for i in range(1, 11):\n encabezado = 'Tabla del {}'\n print(encabezado.format(i))\n print()\n for j in range(1, 11):\n salida = '{} x {} = {}'\n print(salida.format(i, j, i * j))\n else:\n print()\n",
"step-3": "# Autor : Kevin Oswaldo Palacios Jimenez\r\n# Fecha de creacion: 16/09/19 \r\n\r\n# Se genera un bucle con for \r\n# al no tener argumento print no genera ningun cambio \r\n# mas que continuar a la siguiente linea\r\nfor i in range (1,11): \r\n encabezado=\"Tabla del {}\" \r\n print(encabezado.format(i))\r\n\r\n print() \r\n # Usaremos un for dentro de otro generando un bucle mas\r\n for j in range(1,11): \r\n # en donde i tendremos la base \r\n # con j tendriamos el elemento\r\n salida=\"{} x {} = {}\" \r\n print(salida.format(i,j,i*j)) \r\n else: \r\n # con el bucle teniendo su proceso iterativo \r\n # se saltaran las linea pero ejecutando el codigo \r\n print() ",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import configparser
import sqlite3
import time
import uuid
from duoquest.tsq import TableSketchQuery
def input_db_name(conn):
while True:
db_name = input('Database name (default: concert_singer) > ')
if not db_name:
db_name = 'concert_singer'
cur = conn.cursor()
cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))
if cur.fetchone():
break
else:
print(f'<{db_name}> is not a valid database.')
return db_name
def input_nlq():
nlq = input('NLQ (default: How many singers are there?)> ')
if not nlq:
nlq = 'How many singers are there?'
return nlq
def input_num_cols():
while True:
num_cols = input('Number of columns > ')
try:
num_cols = int(num_cols)
break
except Exception as e:
print('Number of columns should be integer!')
return num_cols
def input_order():
ordered = False
while True:
order_input = input('Should results be ordered? (y/n) > ')
if order_input == 'y':
ordered = True
break
elif order_input == 'n':
break
else:
print('y/n only!')
return ordered
def input_limit():
limit = None
while True:
limit_input = input('Limit results to n tuples? (int or blank) > ')
if not limit_input:
break
try:
limit = int(limit_input)
break
except Exception as e:
print('int or blank only!')
return limit
def input_tsq_types(num_cols):
while True:
types_input = input('Types (`text` or `number`, comma separated)> ')
types = list(map(lambda x: x.strip(), types_input.split(',')))
if any(map(lambda x: x not in ('text', 'number'), types)):
print('Types must be `text` or `number`')
continue
if len(types) != num_cols:
print('Number of types must match number of columns.')
continue
break
return types
def input_tsq_row_count():
tsq_row_count = 0
while True:
tsq_row_count_input = input('Number of TSQ rows (int) > ')
try:
tsq_row_count = int(tsq_row_count_input)
break
except Exception as e:
print('int only!')
return tsq_row_count
def input_tsq_row(row_num, tsq_types):
while True:
row_input = input(f'Row {row_num} (semicolon-separated values) > ')
tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))
validated = True
for i, cell in enumerate(tsq_row):
if tsq_types[i] == 'number':
try:
float(cell)
except Exception as e:
print('At least one cell value is invalid.')
validated = False
break
if validated:
break
return tsq_row
def main():
config = configparser.ConfigParser()
config.read('config.ini')
db_path = config['db']['path']
conn = sqlite3.connect(db_path)
db_name = input_db_name(conn)
nlq = input_nlq()
num_cols = input_num_cols()
tsq = TableSketchQuery(num_cols)
tsq.types = input_tsq_types(num_cols)
tsq_row_count = input_tsq_row_count()
for i in range(tsq_row_count):
tsq.values.append(input_tsq_row(i+1, tsq.types))
tsq.order = input_order()
tsq.limit = input_limit()
print(tsq.to_proto())
cur = conn.cursor()
cur.execute('''INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)
VALUES (?, ?, ?, ?, ?, ?)''',
(str(uuid.uuid4()), db_name, nlq,
tsq.to_proto().SerializeToString(), 'waiting',
int(time.time())))
conn.commit()
conn.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "54ec1961f4835f575e7129bd0b2fcdeb97be2f03",
"index": 93,
"step-1": "<mask token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\n<mask token>\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\n<mask token>\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\n<mask token>\n",
"step-4": "import configparser\nimport sqlite3\nimport time\nimport uuid\nfrom duoquest.tsq import TableSketchQuery\n\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n return types\n\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n return tsq_row\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n conn = sqlite3.connect(db_path)\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n tsq = TableSketchQuery(num_cols)\n tsq.types = input_tsq_types(num_cols)\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i + 1, tsq.types))\n tsq.order = input_order()\n tsq.limit = input_limit()\n print(tsq.to_proto())\n cur = conn.cursor()\n cur.execute(\n \"\"\"INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)\"\"\"\n , (str(uuid.uuid4()), db_name, nlq, tsq.to_proto().\n SerializeToString(), 'waiting', int(time.time())))\n conn.commit()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import configparser\nimport sqlite3\nimport time\nimport uuid\n\nfrom duoquest.tsq import TableSketchQuery\n\ndef input_db_name(conn):\n while True:\n db_name = input('Database name (default: concert_singer) > ')\n if not db_name:\n db_name = 'concert_singer'\n cur = conn.cursor()\n\n cur.execute('SELECT 1 FROM databases WHERE name = ?', (db_name,))\n if cur.fetchone():\n break\n else:\n print(f'<{db_name}> is not a valid database.')\n return db_name\n\ndef input_nlq():\n nlq = input('NLQ (default: How many singers are there?)> ')\n if not nlq:\n nlq = 'How many singers are there?'\n return nlq\n\ndef input_num_cols():\n while True:\n num_cols = input('Number of columns > ')\n try:\n num_cols = int(num_cols)\n break\n except Exception as e:\n print('Number of columns should be integer!')\n return num_cols\n\ndef input_order():\n ordered = False\n while True:\n order_input = input('Should results be ordered? (y/n) > ')\n if order_input == 'y':\n ordered = True\n break\n elif order_input == 'n':\n break\n else:\n print('y/n only!')\n return ordered\n\ndef input_limit():\n limit = None\n while True:\n limit_input = input('Limit results to n tuples? (int or blank) > ')\n if not limit_input:\n break\n try:\n limit = int(limit_input)\n break\n except Exception as e:\n print('int or blank only!')\n return limit\n\ndef input_tsq_types(num_cols):\n while True:\n types_input = input('Types (`text` or `number`, comma separated)> ')\n types = list(map(lambda x: x.strip(), types_input.split(',')))\n\n if any(map(lambda x: x not in ('text', 'number'), types)):\n print('Types must be `text` or `number`')\n continue\n\n if len(types) != num_cols:\n print('Number of types must match number of columns.')\n continue\n break\n\n return types\n\ndef input_tsq_row_count():\n tsq_row_count = 0\n while True:\n tsq_row_count_input = input('Number of TSQ rows (int) > ')\n try:\n tsq_row_count = int(tsq_row_count_input)\n break\n except Exception as e:\n print('int only!')\n return tsq_row_count\n\ndef input_tsq_row(row_num, tsq_types):\n while True:\n row_input = input(f'Row {row_num} (semicolon-separated values) > ')\n tsq_row = list(map(lambda x: x.strip(), row_input.split(';')))\n\n validated = True\n for i, cell in enumerate(tsq_row):\n if tsq_types[i] == 'number':\n try:\n float(cell)\n except Exception as e:\n print('At least one cell value is invalid.')\n validated = False\n break\n if validated:\n break\n\n return tsq_row\n\ndef main():\n config = configparser.ConfigParser()\n config.read('config.ini')\n db_path = config['db']['path']\n\n conn = sqlite3.connect(db_path)\n\n db_name = input_db_name(conn)\n nlq = input_nlq()\n num_cols = input_num_cols()\n\n tsq = TableSketchQuery(num_cols)\n\n tsq.types = input_tsq_types(num_cols)\n\n tsq_row_count = input_tsq_row_count()\n for i in range(tsq_row_count):\n tsq.values.append(input_tsq_row(i+1, tsq.types))\n\n tsq.order = input_order()\n tsq.limit = input_limit()\n\n print(tsq.to_proto())\n\n cur = conn.cursor()\n cur.execute('''INSERT INTO tasks (tid, db, nlq, tsq_proto, status, time)\n VALUES (?, ?, ?, ?, ?, ?)''',\n (str(uuid.uuid4()), db_name, nlq,\n tsq.to_proto().SerializeToString(), 'waiting',\n int(time.time())))\n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
<|reserved_special_token_0|>
def openfile(name):
f = open(name, 'r', encoding='utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n', ''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n', ''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n', ''))
A.append(a)
return A
<|reserved_special_token_0|>
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'
]:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k: [] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in
parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or
'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),
line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')
fw.write(''.join(D[k]))
fw.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def openfile(name):
f = open(name, 'r', encoding='utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n', ''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n', ''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n', ''))
A.append(a)
return A
def writefile(name, text):
fw = open(name, 'w', encoding='utf-8')
fw.write(text)
fw.close()
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'
]:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k: [] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in
parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or
'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),
line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')
fw.write(''.join(D[k]))
fw.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def openfile(name):
f = open(name, 'r', encoding='utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n', ''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n', ''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n', ''))
A.append(a)
return A
def writefile(name, text):
fw = open(name, 'w', encoding='utf-8')
fw.write(text)
fw.close()
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'
]:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k: [] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in
parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or
'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),
line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')
fw.write(''.join(D[k]))
fw.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from translit import convert_input
def openfile(name):
f = open(name, 'r', encoding='utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n', ''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n', ''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n', ''))
A.append(a)
return A
def writefile(name, text):
fw = open(name, 'w', encoding='utf-8')
fw.write(text)
fw.close()
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'
]:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k: [] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in
parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or
'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),
line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')
fw.write(''.join(D[k]))
fw.close()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from translit import convert_input
def openfile(name):
f = open(name, 'r', encoding = 'utf-8')
text = f.readlines()
f.close()
return text
def makedict(text):
A = []
for line in text:
if 'lex:' in line:
a = []
a.append(line[6:].replace('\n',''))
elif 'gramm:' in line:
a.append(line[8:].replace('\n',''))
elif 'trans_ru:' in line:
a.append(line[11:].replace('\n',''))
A.append(a)
return A
def writefile(name, text):
fw = open(name, 'w', encoding = 'utf-8')
fw.write(text)
fw.close()
#alf = 'абвгдежзийклмнопрстуфхцчшыьёюяӧӝӟӵ'
#trans = list('abvgdežzijklmnoprstufxcčšə')
#trans.append('ə̂')
#trans.append('ə̈əɤ')
def dictionary():
A = []
for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V']:
A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))
transl = []
for el in A:
a = []
a.append(convert_input(el[0], 'cyr'))
a += el
transl.append(a)
return transl
def dict_split(transl):
D = {k:[] for k in ['N', 'IMIT', 'V']}
row = '%s\t%s\t%s\t%s\n'
for line in dictionary():
parts = []
if line[2] == 'N' or 'ADJ' in line[2]:
parts.append(line[2])
elif 'N-persn' in line[2] or 'N,' in line[2]:
parts.append('N')
elif 'V,' in line[2]:
parts.append('V')
if 'ADV' in line[2]:
parts.append('ADV')
if 'POST' in line[2]:
parts.append('POST')
if 'PRO' in line[2]:
parts.append('PRO')
if 'NUM' in line[2]:
parts.append('NUM')
if 'INTRJ' in line[2]:
parts.append('INTRJ')
if 'CNJ' in line[2]:
parts.append('CNJ')
if 'IMIT' in line[2]:
parts.append('IMIT')
if 'PART' in line[2]:
parts.append('PART')
if 'N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts:
D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'V' in parts or 'PRAED' in parts:
D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
if 'IMIT' in parts:
D['IMIT'].append(row % (line[0], line[1], ', '.join(parts), line[3]))
return D
def main():
D = dict_split(dictionary())
for k in D:
D[k] = set(D[k])
fw = open('udmlex_' + k + '.tsv', 'w', encoding = 'utf-8')
fw.write(''.join(D[k]))
fw.close()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "29e54a9ec0d65965645ac4aabf8c247a8857a25f",
"index": 3778,
"step-1": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\n<mask token>\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from translit import convert_input\n\n\ndef openfile(name):\n f = open(name, 'r', encoding='utf-8')\n text = f.readlines()\n f.close()\n return text\n\n\ndef makedict(text):\n A = []\n for line in text:\n if 'lex:' in line:\n a = []\n a.append(line[6:].replace('\\n', ''))\n elif 'gramm:' in line:\n a.append(line[8:].replace('\\n', ''))\n elif 'trans_ru:' in line:\n a.append(line[11:].replace('\\n', ''))\n A.append(a)\n return A\n\n\ndef writefile(name, text):\n fw = open(name, 'w', encoding='utf-8')\n fw.write(text)\n fw.close()\n\n\ndef dictionary():\n A = []\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V'\n ]:\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\n transl = []\n for el in A:\n a = []\n a.append(convert_input(el[0], 'cyr'))\n a += el\n transl.append(a)\n return transl\n\n\ndef dict_split(transl):\n D = {k: [] for k in ['N', 'IMIT', 'V']}\n row = '%s\\t%s\\t%s\\t%s\\n'\n for line in dictionary():\n parts = []\n if line[2] == 'N' or 'ADJ' in line[2]:\n parts.append(line[2])\n elif 'N-persn' in line[2] or 'N,' in line[2]:\n parts.append('N')\n elif 'V,' in line[2]:\n parts.append('V')\n if 'ADV' in line[2]:\n parts.append('ADV')\n if 'POST' in line[2]:\n parts.append('POST')\n if 'PRO' in line[2]:\n parts.append('PRO')\n if 'NUM' in line[2]:\n parts.append('NUM')\n if 'INTRJ' in line[2]:\n parts.append('INTRJ')\n if 'CNJ' in line[2]:\n parts.append('CNJ')\n if 'IMIT' in line[2]:\n parts.append('IMIT')\n if 'PART' in line[2]:\n parts.append('PART')\n if ('N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in\n parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or\n 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts):\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'V' in parts or 'PRAED' in parts:\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\n if 'IMIT' in parts:\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts),\n line[3]))\n return D\n\n\ndef main():\n D = dict_split(dictionary())\n for k in D:\n D[k] = set(D[k])\n fw = open('udmlex_' + k + '.tsv', 'w', encoding='utf-8')\n fw.write(''.join(D[k]))\n fw.close()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from translit import convert_input\r\n\r\ndef openfile(name):\r\n f = open(name, 'r', encoding = 'utf-8')\r\n text = f.readlines()\r\n f.close()\r\n return text\r\n\r\ndef makedict(text):\r\n A = []\r\n for line in text:\r\n if 'lex:' in line:\r\n a = []\r\n a.append(line[6:].replace('\\n',''))\r\n elif 'gramm:' in line:\r\n a.append(line[8:].replace('\\n',''))\r\n elif 'trans_ru:' in line:\r\n a.append(line[11:].replace('\\n',''))\r\n A.append(a)\r\n return A\r\n\r\ndef writefile(name, text):\r\n fw = open(name, 'w', encoding = 'utf-8')\r\n fw.write(text) \r\n fw.close()\r\n\r\n#alf = 'абвгдежзийклмнопрстуфхцчшыьёюяӧӝӟӵ'\r\n#trans = list('abvgdežzijklmnoprstufxcčšə')\r\n#trans.append('ə̂')\r\n#trans.append('ə̈əɤ')\r\n\r\ndef dictionary():\r\n A = []\r\n for i in ['ADJ', 'IMIT', 'N', 'N_persn', 'NRel', 'PRO', 'unchangeable', 'V']:\r\n A += makedict(openfile('udm_lexemes_{}.txt'.format(i)))\r\n transl = []\r\n for el in A:\r\n a = []\r\n a.append(convert_input(el[0], 'cyr'))\r\n a += el\r\n transl.append(a)\r\n return transl\r\n\r\ndef dict_split(transl):\r\n D = {k:[] for k in ['N', 'IMIT', 'V']}\r\n row = '%s\\t%s\\t%s\\t%s\\n'\r\n for line in dictionary():\r\n parts = []\r\n if line[2] == 'N' or 'ADJ' in line[2]:\r\n parts.append(line[2])\r\n elif 'N-persn' in line[2] or 'N,' in line[2]:\r\n parts.append('N')\r\n elif 'V,' in line[2]: \r\n parts.append('V')\r\n if 'ADV' in line[2]:\r\n parts.append('ADV')\r\n if 'POST' in line[2]:\r\n parts.append('POST')\r\n if 'PRO' in line[2]:\r\n parts.append('PRO')\r\n if 'NUM' in line[2]:\r\n parts.append('NUM')\r\n if 'INTRJ' in line[2]:\r\n parts.append('INTRJ')\r\n if 'CNJ' in line[2]:\r\n parts.append('CNJ')\r\n if 'IMIT' in line[2]:\r\n parts.append('IMIT')\r\n if 'PART' in line[2]:\r\n parts.append('PART')\r\n if 'N' in parts or 'ADJ' in parts or 'ADV' in parts or 'POST' in parts or 'PRO' in parts or 'NUM' in parts or 'PRAED' in parts or 'INTRJ' in parts or 'CNJ' in parts or 'PART' in parts:\r\n D['N'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n if 'V' in parts or 'PRAED' in parts:\r\n D['V'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n if 'IMIT' in parts:\r\n D['IMIT'].append(row % (line[0], line[1], ', '.join(parts), line[3]))\r\n return D\r\n\r\ndef main():\r\n D = dict_split(dictionary()) \r\n for k in D:\r\n D[k] = set(D[k])\r\n fw = open('udmlex_' + k + '.tsv', 'w', encoding = 'utf-8')\r\n fw.write(''.join(D[k]))\r\n fw.close()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from fieldsets import getSingleField, SortAsc
from sqlalchemy import func
from ladderdb import ElementNotFoundException, EmptyRankingListException
from db_entities import Player, Result
from bottle import route,request
from globe import db,env
@route('/player')
def output( ):
player_name = getSingleField( 'player', request )
order = getSingleField( 'order', request , 'nick')
ladder_id = getSingleField( 'ladder', request )
try:
s = db.sessionmaker()
if player_name:
player = db.GetPlayer( player_name )
ladders = db.GetLadderByPlayer( player.id )
played = dict()
positions = dict()
for ladder in ladders:
positions[ladder.id] = db.GetPlayerPosition( ladder.id, player.id )
played[ladder.id] = s.query( Result.id ).filter( Result.ladder_id == ladder.id ).filter( Result.player_id == player.id ).count()
results = s.query( Result ).filter( Result.player_id == player.id).order_by(Result.date.desc())[0:5]
matches = []
for r in results:
matches.append( r.match )
template = env.get_template('viewplayer.html')
s.close()
return template.render(player=player,ladders=ladders, positions=positions,played=played,matches=matches )
else:
asc = getSingleField( 'asc', request, 'False' )
if not asc:
asc = 'False'
q = s.query( Player, func.count(Result.id).label('played')).outerjoin( (Result, Result.player_id == Player.id ) )\
.filter( Player.id.in_(s.query( Result.player_id ).filter( Player.id == Result.player_id ) ) ) \
.filter( Result.player_id == Player.id ).group_by( Player.id )
if ladder_id:
q = q.filter( Player.id.in_( s.query( Result.player_id ).filter( Result.ladder_id == ladder_id ) ) )
if order == 'nick':
q = q.order_by( SortAsc( Player.nick, asc ) )
elif order == 'id' :
q = q.order_by( SortAsc( Player.id, asc ) )
else:
order = 'played'
q = q.order_by( SortAsc( func.count(Result.id), asc ) )
limit = int(getSingleField( 'limit', request, q.count() ))
offset = int(getSingleField( 'offset', request, 0 ))
players = q[offset:offset+limit-1]
template = env.get_template('viewplayerlist.html')
s.close()
return template.render(players=players,offset=offset,limit=limit,order=order,asc=asc )
except ElementNotFoundException, e:
err_msg="player %s not found"%(str(player_name))
except EmptyRankingListException, m:
err_msg=(str(m))
if s:
s.close()
template = env.get_template('error.html')
return template.render( err_msg=err_msg )
|
normal
|
{
"blob_id": "97d128694709c4fe0d9ec2b2749d8e4ec5df7322",
"index": 8812,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom fieldsets import getSingleField, SortAsc\nfrom sqlalchemy import func\nfrom ladderdb import ElementNotFoundException, EmptyRankingListException\nfrom db_entities import Player, Result\nfrom bottle import route,request\nfrom globe import db,env\n\n@route('/player')\ndef output( ):\n\tplayer_name = getSingleField( 'player', request )\n\torder = getSingleField( 'order', request , 'nick')\n\tladder_id = getSingleField( 'ladder', request )\n\ttry:\n\t\ts = db.sessionmaker()\n\t\tif player_name:\n\t\t\tplayer = db.GetPlayer( player_name )\n\t\t\tladders = db.GetLadderByPlayer( player.id )\n\t\t\tplayed = dict()\n\t\t\tpositions = dict()\n\t\t\tfor ladder in ladders:\n\t\t\t\tpositions[ladder.id] = db.GetPlayerPosition( ladder.id, player.id )\n\t\t\t\tplayed[ladder.id] = s.query( Result.id ).filter( Result.ladder_id == ladder.id ).filter( Result.player_id == player.id ).count()\n\n\t\t\tresults = s.query( Result ).filter( Result.player_id == player.id).order_by(Result.date.desc())[0:5]\n\t\t\tmatches = []\n\t\t\tfor r in results:\n\t\t\t\tmatches.append( r.match )\n\n\t\t\ttemplate = env.get_template('viewplayer.html')\n\t\t\ts.close()\n\t\t\treturn template.render(player=player,ladders=ladders, positions=positions,played=played,matches=matches )\n\t\telse:\n\t\t\tasc = getSingleField( 'asc', request, 'False' )\n\t\t\tif not asc:\n\t\t\t\tasc = 'False'\n\t\t\tq = s.query( Player, func.count(Result.id).label('played')).outerjoin( (Result, Result.player_id == Player.id ) )\\\n\t\t\t\t.filter( Player.id.in_(s.query( Result.player_id ).filter( Player.id == Result.player_id ) ) ) \\\n\t\t\t\t.filter( Result.player_id == Player.id ).group_by( Player.id )\n\t\t\tif ladder_id:\n\t\t\t\tq = q.filter( Player.id.in_( s.query( Result.player_id ).filter( Result.ladder_id == ladder_id ) ) )\n\t\t\tif order == 'nick':\n\t\t\t\tq = q.order_by( SortAsc( Player.nick, asc ) )\n\t\t\telif order == 'id' :\n\t\t\t\tq = q.order_by( SortAsc( Player.id, asc ) )\n\t\t\telse:\n\t\t\t\torder = 'played'\n\t\t\t\tq = q.order_by( SortAsc( func.count(Result.id), asc ) )\n\n\t\t\tlimit = int(getSingleField( 'limit', request, q.count() ))\n\t\t\toffset = int(getSingleField( 'offset', request, 0 ))\n\t\t\tplayers = q[offset:offset+limit-1]\n\t\t\ttemplate = env.get_template('viewplayerlist.html')\n\t\t\ts.close()\n\t\t\treturn template.render(players=players,offset=offset,limit=limit,order=order,asc=asc )\n\n\texcept ElementNotFoundException, e:\n\t\terr_msg=\"player %s not found\"%(str(player_name))\n\n\texcept EmptyRankingListException, m:\n\t\terr_msg=(str(m))\n\tif s:\n\t\ts.close()\n\ttemplate = env.get_template('error.html')\n\treturn template.render( err_msg=err_msg )",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response
import os, sys
import config
import boto.ec2.elb
import boto
from boto.ec2 import *
app = Flask(__name__)
@app.route('/')
def index():
list = []
creds = config.get_ec2_conf()
for region in config.region_list():
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
zones = conn.get_all_zones()
instances = conn.get_all_instance_status()
instance_count = len(instances)
ebs = conn.get_all_volumes()
ebscount = len(ebs)
unattached_ebs = 0
unattached_eli = 0
event_count = 0
for instance in instances:
events = instance.events
if events:
event_count = event_count + 1
for vol in ebs:
state = vol.attachment_state()
if state == None:
unattached_ebs = unattached_ebs + 1
elis = conn.get_all_addresses()
eli_count = len(elis)
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
unattached_eli = unattached_eli + 1
connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=
creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elb = connelb.get_all_load_balancers()
elb_count = len(elb)
list.append({'region': region, 'zones': zones, 'instance_count':
instance_count, 'ebscount': ebscount, 'unattached_ebs':
unattached_ebs, 'eli_count': eli_count, 'unattached_eli':
unattached_eli, 'elb_count': elb_count, 'event_count': event_count}
)
return render_template('index.html', list=list)
@app.route('/ebs_volumes/<region>/')
def ebs_volumes(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ebs = conn.get_all_volumes()
ebs_vol = []
for vol in ebs:
state = vol.attachment_state()
if state == None:
ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,
'status': vol.status}
ebs_vol.append(ebs_info)
return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)
@app.route('/ebs_volumes/<region>/delete/<vol_id>')
def delete_ebs_vol(region=None, vol_id=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
vol_id = vol_id.encode('ascii')
vol_ids = conn.get_all_volumes(volume_ids=vol_id)
for vol in vol_ids:
vol.delete()
return redirect(url_for('ebs_volumes', region=region))
@app.route('/elastic_ips/<region>/')
def elastic_ips(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
elis = conn.get_all_addresses()
un_eli = []
for eli in elis:
instance_id = eli.instance_id
if not instance_id:
eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}
un_eli.append(eli_info)
return render_template('elastic_ip.html', un_eli=un_eli, region=region)
@app.route('/elastic_ips/<region>/delete/<ip>')
def delete_elastic_ip(region=None, ip=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
ip = ip.encode('ascii')
elis = conn.get_all_addresses(addresses=ip)
for eli in elis:
eli.release()
return redirect(url_for('elastic_ips', region=region))
@app.route('/instance_events/<region>/')
def instance_events(region=None):
creds = config.get_ec2_conf()
conn = connect_to_region(region, aws_access_key_id=creds[
'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[
'AWS_SECRET_ACCESS_KEY'])
instances = conn.get_all_instance_status()
instance_event_list = []
for instance in instances:
event = instance.events
if event:
event_info = {'instance_id': instance.id, 'event': instance.
events[0].code, 'description': instance.events[0].
description, 'event_before': instance.events[0].not_before,
'event_after': instance.events[0].not_after}
instance_event_list.append(event_info)
return render_template('instance_events.html', instance_event_list=
instance_event_list)
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
|
normal
|
{
"blob_id": "22c2425f1dc14b6b0005ebf2231af8abf43aa2e1",
"index": 5273,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-4": "from flask import Flask, flash, abort, redirect, url_for, request, render_template, make_response, json, Response\nimport os, sys\nimport config\nimport boto.ec2.elb\nimport boto\nfrom boto.ec2 import *\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n list = []\n creds = config.get_ec2_conf()\n for region in config.region_list():\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n zones = conn.get_all_zones()\n instances = conn.get_all_instance_status()\n instance_count = len(instances)\n ebs = conn.get_all_volumes()\n ebscount = len(ebs)\n unattached_ebs = 0\n unattached_eli = 0\n event_count = 0\n for instance in instances:\n events = instance.events\n if events:\n event_count = event_count + 1\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n unattached_ebs = unattached_ebs + 1\n elis = conn.get_all_addresses()\n eli_count = len(elis)\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n unattached_eli = unattached_eli + 1\n connelb = boto.ec2.elb.connect_to_region(region, aws_access_key_id=\n creds['AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elb = connelb.get_all_load_balancers()\n elb_count = len(elb)\n list.append({'region': region, 'zones': zones, 'instance_count':\n instance_count, 'ebscount': ebscount, 'unattached_ebs':\n unattached_ebs, 'eli_count': eli_count, 'unattached_eli':\n unattached_eli, 'elb_count': elb_count, 'event_count': event_count}\n )\n return render_template('index.html', list=list)\n\n\[email protected]('/ebs_volumes/<region>/')\ndef ebs_volumes(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ebs = conn.get_all_volumes()\n ebs_vol = []\n for vol in ebs:\n state = vol.attachment_state()\n if state == None:\n ebs_info = {'id': vol.id, 'size': vol.size, 'iops': vol.iops,\n 'status': vol.status}\n ebs_vol.append(ebs_info)\n return render_template('ebs_volume.html', ebs_vol=ebs_vol, region=region)\n\n\[email protected]('/ebs_volumes/<region>/delete/<vol_id>')\ndef delete_ebs_vol(region=None, vol_id=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n vol_id = vol_id.encode('ascii')\n vol_ids = conn.get_all_volumes(volume_ids=vol_id)\n for vol in vol_ids:\n vol.delete()\n return redirect(url_for('ebs_volumes', region=region))\n\n\[email protected]('/elastic_ips/<region>/')\ndef elastic_ips(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n elis = conn.get_all_addresses()\n un_eli = []\n for eli in elis:\n instance_id = eli.instance_id\n if not instance_id:\n eli_info = {'public_ip': eli.public_ip, 'domain': eli.domain}\n un_eli.append(eli_info)\n return render_template('elastic_ip.html', un_eli=un_eli, region=region)\n\n\[email protected]('/elastic_ips/<region>/delete/<ip>')\ndef delete_elastic_ip(region=None, ip=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n ip = ip.encode('ascii')\n elis = conn.get_all_addresses(addresses=ip)\n for eli in elis:\n eli.release()\n return redirect(url_for('elastic_ips', region=region))\n\n\[email protected]('/instance_events/<region>/')\ndef instance_events(region=None):\n creds = config.get_ec2_conf()\n conn = connect_to_region(region, aws_access_key_id=creds[\n 'AWS_ACCESS_KEY_ID'], aws_secret_access_key=creds[\n 'AWS_SECRET_ACCESS_KEY'])\n instances = conn.get_all_instance_status()\n instance_event_list = []\n for instance in instances:\n event = instance.events\n if event:\n event_info = {'instance_id': instance.id, 'event': instance.\n events[0].code, 'description': instance.events[0].\n description, 'event_before': instance.events[0].not_before,\n 'event_after': instance.events[0].not_after}\n instance_event_list.append(event_info)\n return render_template('instance_events.html', instance_event_list=\n instance_event_list)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n",
"step-5": null,
"step-ids": [
6,
7,
8,
9
]
}
|
[
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
from .base import BaseSchema
from marshmallow import fields
class BaseTickSchema(BaseSchema):
"""
Time : 时间
High : 最高价
Low : 最低价
Volume : 交易量
Last : 最新价
"""
Time = fields.String()
High = fields.String()
Low = fields.String()
Volume = fields.String()
Last = fields.String()
|
normal
|
{
"blob_id": "6cc23a3e2fa3b1baddf05b30a1054a7faf0371a6",
"index": 5528,
"step-1": "<mask token>\n\n\nclass BaseTickSchema(BaseSchema):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseTickSchema(BaseSchema):\n <mask token>\n Time = fields.String()\n High = fields.String()\n Low = fields.String()\n Volume = fields.String()\n Last = fields.String()\n",
"step-3": "<mask token>\n\n\nclass BaseTickSchema(BaseSchema):\n \"\"\"\n Time : 时间\n High : 最高价\n Low : 最低价\n Volume : 交易量\n Last : 最新价\n \"\"\"\n Time = fields.String()\n High = fields.String()\n Low = fields.String()\n Volume = fields.String()\n Last = fields.String()\n",
"step-4": "from .base import BaseSchema\nfrom marshmallow import fields\n\n\nclass BaseTickSchema(BaseSchema):\n \"\"\"\n Time : 时间\n High : 最高价\n Low : 最低价\n Volume : 交易量\n Last : 最新价\n \"\"\"\n Time = fields.String()\n High = fields.String()\n Low = fields.String()\n Volume = fields.String()\n Last = fields.String()\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom .base import BaseSchema\nfrom marshmallow import fields\n\n\nclass BaseTickSchema(BaseSchema):\n \"\"\"\n Time : 时间\n High : 最高价\n Low : 最低价\n Volume : 交易量\n Last : 最新价\n \"\"\"\n\n Time = fields.String()\n High = fields.String()\n Low = fields.String()\n Volume = fields.String()\n Last = fields.String()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# coding: utf-8
# # PyCity School Analysis
# 1. Charter school types show better performace than District School types in all the scores.
# 2. Overall students are performing better in english between (80 to 84%), than math (76 to 84%)
# ### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# In[1]:
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load (Remember to Change These)
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# In[2]:
#Calculate the total number of schools
total_schools = len(school_data)
#Calculate the total number of students
total_students = len(student_data)
#Calculate the total budget
total_buget = school_data['budget'].sum()
#Calculate the average math score
avg_math_score = student_data['math_score'].mean()
#Calculate the average reading score
avg_reading_score = student_data['reading_score'].mean()
#Calculate the overall passing rate (overall average score)
overall_avg_score = ((avg_math_score + avg_reading_score)/2)
#Calculate the percentage of students with a passing math score (70 or greater)
passsing_math_score = (student_data['math_score'] >= 70).sum()
percent_math_passing = (passsing_math_score/len(student_data['math_score']))*100
#Calculate the percentage of students with a passing reading score (70 or greater)
passsing_reading_score = (student_data['reading_score'] >= 70).sum()
percent_reading_passing = (passsing_reading_score/len(student_data['reading_score']))*100
#Create a dataframe to hold the above results
District_Summary_df = pd.DataFrame({'Total Schools' : [total_schools], 'Total Students' : [total_students], 'Total Budget' :[total_buget], 'Average Math Score' : [avg_math_score], 'Average Reading Score':[avg_reading_score], '% Passing Math' : [percent_math_passing], '% Passing Reading' : [percent_reading_passing], '% Overall Passing Rate' : [overall_avg_score]})
District_Summary_df
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
#
# * Create a dataframe to hold the above results
# ## Top Performing Schools (By Passing Rate)
# * Sort and display the top five schools in overall passing rate
# In[3]:
#group by School Name
school_groups = school_data_complete.set_index('school_name').groupby(['school_name'])
#find School type
school_type = school_data.set_index('school_name')['type']
#Calculate total students in each school
total_student = school_groups['Student ID'].count()
#Calculate total budget in each school
school_total_budget = school_data.set_index('school_name')['budget']
#Calculate budget per student in each school
per_stu_budget = school_total_budget/school_data.set_index('school_name')['size']
#Calculate average math score
total_math_score = school_data_complete.groupby(['school_name'])['math_score'].sum()
avg_math = total_math_score/total_student
#Calculate average reading score
total_reading_score = school_data_complete.groupby(['school_name'])['reading_score'].sum()
avg_reading = total_reading_score/total_student
#Calculate math score >= 70
pass_math_score = school_data_complete[school_data_complete['math_score'] >= 70].groupby('school_name')['math_score'].count()
pass_math_percent = (pass_math_score/total_student)*100
##Calculate reading score >= 70
pass_reading_score = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('school_name')['reading_score'].count()
pass_reading_percent = (pass_reading_score/total_student)*100
#Calculate overall passing rate
overall_reading_rate = (pass_math_percent + pass_reading_percent)/2
#Adding all the calculated columns in dataframe
school_summary_df = pd.DataFrame({'School Type' : school_type, 'Total Students' : total_student, 'Total School Budget' : total_buget, 'Per Student Budget' : per_stu_budget, 'Average Math Score' : avg_math, 'Average Reading Score' : avg_reading, '% Passing Math' : pass_math_percent, '% Passing Reading' : pass_reading_percent, '% Overall Passing Rate' : overall_reading_rate})
school_summary_df
#Sort and display the top five schools in overall passing rate
top_performing = school_summary_df.sort_values('% Overall Passing Rate', ascending = False)
top_performing.head()
# ## Bottom Performing Schools (By Passing Rate)
# * Sort and display the five worst-performing schools
# In[4]:
#Sort and display the five worst-performing schools
top_performing = school_summary_df.sort_values('% Overall Passing Rate')
top_performing.head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# In[5]:
#Create dataframe to hold average math score
grade_math_score = pd.DataFrame()
#Calclulate average math score for 9th
grade_math_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 10th
grade_math_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 11th
grade_math_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['math_score'].mean()
#Calclulate average math score for 12th
grade_math_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['math_score'].mean()
#formatting by setting index name blank
grade_math_score.index.name = ''
grade_math_score
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# In[6]:
#Create dataframe to hold average reading score
grade_reading_score = pd.DataFrame()
#Calclulate average reading score for 9th
grade_reading_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 10th
grade_reading_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 11th
grade_reading_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['reading_score'].mean()
#Calclulate average reading score for 12th
grade_reading_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['reading_score'].mean()
#formatting by setting index name blank
grade_reading_score.index.name = ''
grade_reading_score
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
# In[7]:
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$585-615", "$615-645", "$645-675"]
# In[8]:
# create dataframe with needed columns
school_spending_ranges = school_summary_df.loc[:, ['Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate']]
#Calculate average score based on spending_bins
school_spending_ranges['Spending Ranges (Per Student)'] = pd.cut(school_summary_df['Per Student Budget'], spending_bins, labels = group_names)
school_spending_ranges = school_spending_ranges.groupby('Spending Ranges (Per Student)').mean()
school_spending_ranges
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
# In[9]:
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
group_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# In[10]:
# create dataframe with needed columns
school_size_score = school_summary_df.loc[:, ['Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate']]
#Calculate average score as per size_bins
school_size_score['School Size'] = pd.cut(school_summary_df['Total Students'], size_bins, labels = group_names)
school_size_score = school_size_score.groupby('School Size').mean()
school_size_score
# ## Scores by School Type
# * Perform the same operations as above, based on school type.
# In[11]:
# create dataframe with needed columns
scores_School_type = school_summary_df[['School Type','Average Math Score',
'Average Reading Score','% Passing Math',
'% Passing Reading','% Overall Passing Rate',]]
#create a group based on school type
scores_School_type = scores_School_type.groupby('School Type').mean()
scores_School_type
# In[ ]:
|
normal
|
{
"blob_id": "8488fdd216c30c3cb4b0060305af6708d890bc86",
"index": 8203,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nDistrict_Summary_df\n<mask token>\nschool_summary_df\n<mask token>\ntop_performing.head()\n<mask token>\ntop_performing.head()\n<mask token>\ngrade_math_score\n<mask token>\ngrade_reading_score\n<mask token>\nschool_spending_ranges\n<mask token>\nschool_size_score\n<mask token>\nscores_School_type\n",
"step-3": "<mask token>\nschool_data_to_load = 'Resources/schools_complete.csv'\nstudent_data_to_load = 'Resources/students_complete.csv'\nschool_data = pd.read_csv(school_data_to_load)\nstudent_data = pd.read_csv(student_data_to_load)\nschool_data_complete = pd.merge(student_data, school_data, how='left', on=[\n 'school_name', 'school_name'])\ntotal_schools = len(school_data)\ntotal_students = len(student_data)\ntotal_buget = school_data['budget'].sum()\navg_math_score = student_data['math_score'].mean()\navg_reading_score = student_data['reading_score'].mean()\noverall_avg_score = (avg_math_score + avg_reading_score) / 2\npasssing_math_score = (student_data['math_score'] >= 70).sum()\npercent_math_passing = passsing_math_score / len(student_data['math_score']\n ) * 100\npasssing_reading_score = (student_data['reading_score'] >= 70).sum()\npercent_reading_passing = passsing_reading_score / len(student_data[\n 'reading_score']) * 100\nDistrict_Summary_df = pd.DataFrame({'Total Schools': [total_schools],\n 'Total Students': [total_students], 'Total Budget': [total_buget],\n 'Average Math Score': [avg_math_score], 'Average Reading Score': [\n avg_reading_score], '% Passing Math': [percent_math_passing],\n '% Passing Reading': [percent_reading_passing],\n '% Overall Passing Rate': [overall_avg_score]})\nDistrict_Summary_df\nschool_groups = school_data_complete.set_index('school_name').groupby([\n 'school_name'])\nschool_type = school_data.set_index('school_name')['type']\ntotal_student = school_groups['Student ID'].count()\nschool_total_budget = school_data.set_index('school_name')['budget']\nper_stu_budget = school_total_budget / school_data.set_index('school_name')[\n 'size']\ntotal_math_score = school_data_complete.groupby(['school_name'])['math_score'\n ].sum()\navg_math = total_math_score / total_student\ntotal_reading_score = school_data_complete.groupby(['school_name'])[\n 'reading_score'].sum()\navg_reading = total_reading_score / total_student\npass_math_score = school_data_complete[school_data_complete['math_score'] >= 70\n ].groupby('school_name')['math_score'].count()\npass_math_percent = pass_math_score / total_student * 100\npass_reading_score = school_data_complete[school_data_complete[\n 'reading_score'] >= 70].groupby('school_name')['reading_score'].count()\npass_reading_percent = pass_reading_score / total_student * 100\noverall_reading_rate = (pass_math_percent + pass_reading_percent) / 2\nschool_summary_df = pd.DataFrame({'School Type': school_type,\n 'Total Students': total_student, 'Total School Budget': total_buget,\n 'Per Student Budget': per_stu_budget, 'Average Math Score': avg_math,\n 'Average Reading Score': avg_reading, '% Passing Math':\n pass_math_percent, '% Passing Reading': pass_reading_percent,\n '% Overall Passing Rate': overall_reading_rate})\nschool_summary_df\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate',\n ascending=False)\ntop_performing.head()\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate')\ntop_performing.head()\ngrade_math_score = pd.DataFrame()\ngrade_math_score['9th'] = school_data_complete[school_data_complete['grade'\n ] == '9th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['10th'] = school_data_complete[school_data_complete[\n 'grade'] == '10th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['11th'] = school_data_complete[school_data_complete[\n 'grade'] == '11th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['12th'] = school_data_complete[school_data_complete[\n 'grade'] == '12th'].groupby('school_name')['math_score'].mean()\ngrade_math_score.index.name = ''\ngrade_math_score\ngrade_reading_score = pd.DataFrame()\ngrade_reading_score['9th'] = school_data_complete[school_data_complete[\n 'grade'] == '9th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['10th'] = school_data_complete[school_data_complete[\n 'grade'] == '10th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['11th'] = school_data_complete[school_data_complete[\n 'grade'] == '11th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['12th'] = school_data_complete[school_data_complete[\n 'grade'] == '12th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score.index.name = ''\ngrade_reading_score\nspending_bins = [0, 585, 615, 645, 675]\ngroup_names = ['<$585', '$585-615', '$615-645', '$645-675']\nschool_spending_ranges = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nschool_spending_ranges['Spending Ranges (Per Student)'] = pd.cut(\n school_summary_df['Per Student Budget'], spending_bins, labels=group_names)\nschool_spending_ranges = school_spending_ranges.groupby(\n 'Spending Ranges (Per Student)').mean()\nschool_spending_ranges\nsize_bins = [0, 1000, 2000, 5000]\ngroup_names = ['Small (<1000)', 'Medium (1000-2000)', 'Large (2000-5000)']\nschool_size_score = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nschool_size_score['School Size'] = pd.cut(school_summary_df[\n 'Total Students'], size_bins, labels=group_names)\nschool_size_score = school_size_score.groupby('School Size').mean()\nschool_size_score\nscores_School_type = school_summary_df[['School Type', 'Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nscores_School_type = scores_School_type.groupby('School Type').mean()\nscores_School_type\n",
"step-4": "import pandas as pd\nimport numpy as np\nschool_data_to_load = 'Resources/schools_complete.csv'\nstudent_data_to_load = 'Resources/students_complete.csv'\nschool_data = pd.read_csv(school_data_to_load)\nstudent_data = pd.read_csv(student_data_to_load)\nschool_data_complete = pd.merge(student_data, school_data, how='left', on=[\n 'school_name', 'school_name'])\ntotal_schools = len(school_data)\ntotal_students = len(student_data)\ntotal_buget = school_data['budget'].sum()\navg_math_score = student_data['math_score'].mean()\navg_reading_score = student_data['reading_score'].mean()\noverall_avg_score = (avg_math_score + avg_reading_score) / 2\npasssing_math_score = (student_data['math_score'] >= 70).sum()\npercent_math_passing = passsing_math_score / len(student_data['math_score']\n ) * 100\npasssing_reading_score = (student_data['reading_score'] >= 70).sum()\npercent_reading_passing = passsing_reading_score / len(student_data[\n 'reading_score']) * 100\nDistrict_Summary_df = pd.DataFrame({'Total Schools': [total_schools],\n 'Total Students': [total_students], 'Total Budget': [total_buget],\n 'Average Math Score': [avg_math_score], 'Average Reading Score': [\n avg_reading_score], '% Passing Math': [percent_math_passing],\n '% Passing Reading': [percent_reading_passing],\n '% Overall Passing Rate': [overall_avg_score]})\nDistrict_Summary_df\nschool_groups = school_data_complete.set_index('school_name').groupby([\n 'school_name'])\nschool_type = school_data.set_index('school_name')['type']\ntotal_student = school_groups['Student ID'].count()\nschool_total_budget = school_data.set_index('school_name')['budget']\nper_stu_budget = school_total_budget / school_data.set_index('school_name')[\n 'size']\ntotal_math_score = school_data_complete.groupby(['school_name'])['math_score'\n ].sum()\navg_math = total_math_score / total_student\ntotal_reading_score = school_data_complete.groupby(['school_name'])[\n 'reading_score'].sum()\navg_reading = total_reading_score / total_student\npass_math_score = school_data_complete[school_data_complete['math_score'] >= 70\n ].groupby('school_name')['math_score'].count()\npass_math_percent = pass_math_score / total_student * 100\npass_reading_score = school_data_complete[school_data_complete[\n 'reading_score'] >= 70].groupby('school_name')['reading_score'].count()\npass_reading_percent = pass_reading_score / total_student * 100\noverall_reading_rate = (pass_math_percent + pass_reading_percent) / 2\nschool_summary_df = pd.DataFrame({'School Type': school_type,\n 'Total Students': total_student, 'Total School Budget': total_buget,\n 'Per Student Budget': per_stu_budget, 'Average Math Score': avg_math,\n 'Average Reading Score': avg_reading, '% Passing Math':\n pass_math_percent, '% Passing Reading': pass_reading_percent,\n '% Overall Passing Rate': overall_reading_rate})\nschool_summary_df\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate',\n ascending=False)\ntop_performing.head()\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate')\ntop_performing.head()\ngrade_math_score = pd.DataFrame()\ngrade_math_score['9th'] = school_data_complete[school_data_complete['grade'\n ] == '9th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['10th'] = school_data_complete[school_data_complete[\n 'grade'] == '10th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['11th'] = school_data_complete[school_data_complete[\n 'grade'] == '11th'].groupby('school_name')['math_score'].mean()\ngrade_math_score['12th'] = school_data_complete[school_data_complete[\n 'grade'] == '12th'].groupby('school_name')['math_score'].mean()\ngrade_math_score.index.name = ''\ngrade_math_score\ngrade_reading_score = pd.DataFrame()\ngrade_reading_score['9th'] = school_data_complete[school_data_complete[\n 'grade'] == '9th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['10th'] = school_data_complete[school_data_complete[\n 'grade'] == '10th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['11th'] = school_data_complete[school_data_complete[\n 'grade'] == '11th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score['12th'] = school_data_complete[school_data_complete[\n 'grade'] == '12th'].groupby('school_name')['reading_score'].mean()\ngrade_reading_score.index.name = ''\ngrade_reading_score\nspending_bins = [0, 585, 615, 645, 675]\ngroup_names = ['<$585', '$585-615', '$615-645', '$645-675']\nschool_spending_ranges = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nschool_spending_ranges['Spending Ranges (Per Student)'] = pd.cut(\n school_summary_df['Per Student Budget'], spending_bins, labels=group_names)\nschool_spending_ranges = school_spending_ranges.groupby(\n 'Spending Ranges (Per Student)').mean()\nschool_spending_ranges\nsize_bins = [0, 1000, 2000, 5000]\ngroup_names = ['Small (<1000)', 'Medium (1000-2000)', 'Large (2000-5000)']\nschool_size_score = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nschool_size_score['School Size'] = pd.cut(school_summary_df[\n 'Total Students'], size_bins, labels=group_names)\nschool_size_score = school_size_score.groupby('School Size').mean()\nschool_size_score\nscores_School_type = school_summary_df[['School Type', 'Average Math Score',\n 'Average Reading Score', '% Passing Math', '% Passing Reading',\n '% Overall Passing Rate']]\nscores_School_type = scores_School_type.groupby('School Type').mean()\nscores_School_type\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# # PyCity School Analysis\n# 1. Charter school types show better performace than District School types in all the scores. \n# 2. Overall students are performing better in english between (80 to 84%), than math (76 to 84%)\n\n# ### Note\n# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.\n\n# In[1]:\n\n\n# Dependencies and Setup\nimport pandas as pd\nimport numpy as np\n\n# File to Load (Remember to Change These)\nschool_data_to_load = \"Resources/schools_complete.csv\"\nstudent_data_to_load = \"Resources/students_complete.csv\"\n\n# Read School and Student Data File and store into Pandas Data Frames\nschool_data = pd.read_csv(school_data_to_load)\nstudent_data = pd.read_csv(student_data_to_load)\n\n# Combine the data into a single dataset\nschool_data_complete = pd.merge(student_data, school_data, how=\"left\", on=[\"school_name\", \"school_name\"])\n\n\n# ## District Summary\n# \n# * Calculate the total number of schools\n# \n# * Calculate the total number of students\n# \n# * Calculate the total budget\n# \n# * Calculate the average math score \n# \n# * Calculate the average reading score\n# \n# * Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2\n# \n# * Calculate the percentage of students with a passing math score (70 or greater)\n# \n# * Calculate the percentage of students with a passing reading score (70 or greater)\n# \n# * Create a dataframe to hold the above results\n# \n# * Optional: give the displayed data cleaner formatting\n\n# In[2]:\n\n\n#Calculate the total number of schools\ntotal_schools = len(school_data)\n#Calculate the total number of students\ntotal_students = len(student_data)\n#Calculate the total budget\ntotal_buget = school_data['budget'].sum()\n#Calculate the average math score\navg_math_score = student_data['math_score'].mean()\n#Calculate the average reading score\navg_reading_score = student_data['reading_score'].mean()\n#Calculate the overall passing rate (overall average score)\noverall_avg_score = ((avg_math_score + avg_reading_score)/2)\n#Calculate the percentage of students with a passing math score (70 or greater)\npasssing_math_score = (student_data['math_score'] >= 70).sum()\npercent_math_passing = (passsing_math_score/len(student_data['math_score']))*100\n#Calculate the percentage of students with a passing reading score (70 or greater)\npasssing_reading_score = (student_data['reading_score'] >= 70).sum()\npercent_reading_passing = (passsing_reading_score/len(student_data['reading_score']))*100\n\n#Create a dataframe to hold the above results\nDistrict_Summary_df = pd.DataFrame({'Total Schools' : [total_schools], 'Total Students' : [total_students], 'Total Budget' :[total_buget], 'Average Math Score' : [avg_math_score], 'Average Reading Score':[avg_reading_score], '% Passing Math' : [percent_math_passing], '% Passing Reading' : [percent_reading_passing], '% Overall Passing Rate' : [overall_avg_score]})\n\nDistrict_Summary_df\n\n\n# ## School Summary\n\n# * Create an overview table that summarizes key metrics about each school, including:\n# * School Name\n# * School Type\n# * Total Students\n# * Total School Budget\n# * Per Student Budget\n# * Average Math Score\n# * Average Reading Score\n# * % Passing Math\n# * % Passing Reading\n# * Overall Passing Rate (Average of the above two)\n# \n# * Create a dataframe to hold the above results\n\n# ## Top Performing Schools (By Passing Rate)\n\n# * Sort and display the top five schools in overall passing rate\n\n# In[3]:\n\n\n#group by School Name\nschool_groups = school_data_complete.set_index('school_name').groupby(['school_name'])\n#find School type\nschool_type = school_data.set_index('school_name')['type']\n#Calculate total students in each school\ntotal_student = school_groups['Student ID'].count()\n#Calculate total budget in each school\nschool_total_budget = school_data.set_index('school_name')['budget']\n#Calculate budget per student in each school\nper_stu_budget = school_total_budget/school_data.set_index('school_name')['size']\n#Calculate average math score\ntotal_math_score = school_data_complete.groupby(['school_name'])['math_score'].sum()\navg_math = total_math_score/total_student\n#Calculate average reading score\ntotal_reading_score = school_data_complete.groupby(['school_name'])['reading_score'].sum()\navg_reading = total_reading_score/total_student\n#Calculate math score >= 70\npass_math_score = school_data_complete[school_data_complete['math_score'] >= 70].groupby('school_name')['math_score'].count()\npass_math_percent = (pass_math_score/total_student)*100\n##Calculate reading score >= 70\npass_reading_score = school_data_complete[school_data_complete['reading_score'] >= 70].groupby('school_name')['reading_score'].count()\npass_reading_percent = (pass_reading_score/total_student)*100\n#Calculate overall passing rate\noverall_reading_rate = (pass_math_percent + pass_reading_percent)/2\n\n#Adding all the calculated columns in dataframe\nschool_summary_df = pd.DataFrame({'School Type' : school_type, 'Total Students' : total_student, 'Total School Budget' : total_buget, 'Per Student Budget' : per_stu_budget, 'Average Math Score' : avg_math, 'Average Reading Score' : avg_reading, '% Passing Math' : pass_math_percent, '% Passing Reading' : pass_reading_percent, '% Overall Passing Rate' : overall_reading_rate})\nschool_summary_df\n\n#Sort and display the top five schools in overall passing rate\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate', ascending = False)\ntop_performing.head()\n\n\n# ## Bottom Performing Schools (By Passing Rate)\n\n# * Sort and display the five worst-performing schools\n\n# In[4]:\n\n\n#Sort and display the five worst-performing schools\ntop_performing = school_summary_df.sort_values('% Overall Passing Rate')\ntop_performing.head()\n\n\n# ## Math Scores by Grade\n\n# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.\n# \n# * Create a pandas series for each grade. Hint: use a conditional statement.\n# \n# * Group each series by school\n# \n# * Combine the series into a dataframe\n# \n# * Optional: give the displayed data cleaner formatting\n\n# In[5]:\n\n\n#Create dataframe to hold average math score\ngrade_math_score = pd.DataFrame()\n#Calclulate average math score for 9th\ngrade_math_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['math_score'].mean()\n#Calclulate average math score for 10th\ngrade_math_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['math_score'].mean()\n#Calclulate average math score for 11th\ngrade_math_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['math_score'].mean()\n#Calclulate average math score for 12th\ngrade_math_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['math_score'].mean()\n\n#formatting by setting index name blank\ngrade_math_score.index.name = ''\ngrade_math_score\n\n\n# ## Reading Score by Grade \n\n# * Perform the same operations as above for reading scores\n\n# In[6]:\n\n\n#Create dataframe to hold average reading score\ngrade_reading_score = pd.DataFrame()\n#Calclulate average reading score for 9th\ngrade_reading_score['9th'] = school_data_complete[school_data_complete['grade'] == '9th'].groupby('school_name')['reading_score'].mean()\n#Calclulate average reading score for 10th\ngrade_reading_score['10th'] = school_data_complete[school_data_complete['grade'] == '10th'].groupby('school_name')['reading_score'].mean()\n#Calclulate average reading score for 11th\ngrade_reading_score['11th'] = school_data_complete[school_data_complete['grade'] == '11th'].groupby('school_name')['reading_score'].mean()\n#Calclulate average reading score for 12th\ngrade_reading_score['12th'] = school_data_complete[school_data_complete['grade'] == '12th'].groupby('school_name')['reading_score'].mean()\n\n#formatting by setting index name blank\ngrade_reading_score.index.name = ''\ngrade_reading_score\n\n\n# ## Scores by School Spending\n\n# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:\n# * Average Math Score\n# * Average Reading Score\n# * % Passing Math\n# * % Passing Reading\n# * Overall Passing Rate (Average of the above two)\n\n# In[7]:\n\n\n# Sample bins. Feel free to create your own bins.\nspending_bins = [0, 585, 615, 645, 675]\ngroup_names = [\"<$585\", \"$585-615\", \"$615-645\", \"$645-675\"]\n\n\n# In[8]:\n\n\n# create dataframe with needed columns\nschool_spending_ranges = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score','% Passing Math',\n '% Passing Reading','% Overall Passing Rate']]\n\n#Calculate average score based on spending_bins \nschool_spending_ranges['Spending Ranges (Per Student)'] = pd.cut(school_summary_df['Per Student Budget'], spending_bins, labels = group_names)\nschool_spending_ranges = school_spending_ranges.groupby('Spending Ranges (Per Student)').mean()\nschool_spending_ranges\n\n\n# ## Scores by School Size\n\n# * Perform the same operations as above, based on school size.\n\n# In[9]:\n\n\n# Sample bins. Feel free to create your own bins.\nsize_bins = [0, 1000, 2000, 5000]\ngroup_names = [\"Small (<1000)\", \"Medium (1000-2000)\", \"Large (2000-5000)\"]\n\n\n# In[10]:\n\n\n# create dataframe with needed columns\nschool_size_score = school_summary_df.loc[:, ['Average Math Score',\n 'Average Reading Score','% Passing Math',\n '% Passing Reading','% Overall Passing Rate']]\n\n#Calculate average score as per size_bins\nschool_size_score['School Size'] = pd.cut(school_summary_df['Total Students'], size_bins, labels = group_names)\nschool_size_score = school_size_score.groupby('School Size').mean()\nschool_size_score\n\n\n# ## Scores by School Type\n\n# * Perform the same operations as above, based on school type.\n\n# In[11]:\n\n\n# create dataframe with needed columns\nscores_School_type = school_summary_df[['School Type','Average Math Score',\n 'Average Reading Score','% Passing Math',\n '% Passing Reading','% Overall Passing Rate',]]\n#create a group based on school type\nscores_School_type = scores_School_type.groupby('School Type').mean()\nscores_School_type\n\n\n# In[ ]:\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Event(models.Model):
name = models.CharField('Назва', max_length=200)
date = models.DateField('Дата')
address = models.CharField('Адреса', max_length=255, blank=True, null=True)
attendents = models.ManyToManyField(User, through='Atendent', blank=True, null=True)
description = models.TextField('Опис', blank=True, null=True)
def __unicode__(self):
return self.name
class Atendent(models.Model):
user = models.ForeignKey(User)
event = models.ForeignKey(Event, null=True, blank=True)
state = models.IntegerField(null=True, blank=True)
|
normal
|
{
"blob_id": "137f9310256f66ccd9fbe6626659c3c4daea0efc",
"index": 8949,
"step-1": "<mask token>\n\n\nclass Atendent(models.Model):\n user = models.ForeignKey(User)\n event = models.ForeignKey(Event, null=True, blank=True)\n state = models.IntegerField(null=True, blank=True)\n",
"step-2": "<mask token>\n\n\nclass Event(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return self.name\n\n\nclass Atendent(models.Model):\n user = models.ForeignKey(User)\n event = models.ForeignKey(Event, null=True, blank=True)\n state = models.IntegerField(null=True, blank=True)\n",
"step-3": "<mask token>\n\n\nclass Event(models.Model):\n name = models.CharField('Назва', max_length=200)\n date = models.DateField('Дата')\n address = models.CharField('Адреса', max_length=255, blank=True, null=True)\n attendents = models.ManyToManyField(User, through='Atendent', blank=\n True, null=True)\n description = models.TextField('Опис', blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\n\nclass Atendent(models.Model):\n user = models.ForeignKey(User)\n event = models.ForeignKey(Event, null=True, blank=True)\n state = models.IntegerField(null=True, blank=True)\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Event(models.Model):\n name = models.CharField('Назва', max_length=200)\n date = models.DateField('Дата')\n address = models.CharField('Адреса', max_length=255, blank=True, null=True)\n attendents = models.ManyToManyField(User, through='Atendent', blank=\n True, null=True)\n description = models.TextField('Опис', blank=True, null=True)\n\n def __unicode__(self):\n return self.name\n\n\nclass Atendent(models.Model):\n user = models.ForeignKey(User)\n event = models.ForeignKey(Event, null=True, blank=True)\n state = models.IntegerField(null=True, blank=True)\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass Event(models.Model):\n name = models.CharField('Назва', max_length=200)\n date = models.DateField('Дата')\n address = models.CharField('Адреса', max_length=255, blank=True, null=True)\n attendents = models.ManyToManyField(User, through='Atendent', blank=True, null=True)\n description = models.TextField('Опис', blank=True, null=True)\n \n def __unicode__(self):\n return self.name\n\nclass Atendent(models.Model):\n user = models.ForeignKey(User)\n event = models.ForeignKey(Event, null=True, blank=True)\n state = models.IntegerField(null=True, blank=True)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return 'startloop'
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return 'startloop'
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pygame
from score_fetcher import fetch_scores
from entities.sprite_text import TextSprite
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return 'startloop'
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
if __name__ == '__main__':
pass
<|reserved_special_token_1|>
"""
This file contains the ScoreLoop which is used to show
the user thw at most 10 highest scores made by the player
"""
import pygame
from score_fetcher import fetch_scores
from entities.sprite_text import TextSprite
class ScoreLoop:
def __init__(self):
self.scores = fetch_scores()
self.sprites = pygame.sprite.Group()
self.get_score_sprites()
self.space_cooldown = True
def get_score_sprites(self):
rank = 1
for score in self.scores:
self.sprites.add(
TextSprite(str(score), 256, 100+50*rank, True)
)
rank += 1
def increment(self):
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
if self.space_cooldown:
return None
return "startloop"
self.space_cooldown = False
return None
def get_sprites(self):
"""retruns sprites for the UI"""
return self.sprites
if __name__ == "__main__":
pass
|
flexible
|
{
"blob_id": "047b3398a73c9e7d75d43eeeab85f52c05ff90c3",
"index": 4534,
"step-1": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "<mask token>\nimport pygame\nfrom score_fetcher import fetch_scores\nfrom entities.sprite_text import TextSprite\n\n\nclass ScoreLoop:\n\n def __init__(self):\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n self.space_cooldown = True\n\n def get_score_sprites(self):\n rank = 1\n for score in self.scores:\n self.sprites.add(TextSprite(str(score), 256, 100 + 50 * rank, True)\n )\n rank += 1\n\n def increment(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return 'startloop'\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "\"\"\"\nThis file contains the ScoreLoop which is used to show\nthe user thw at most 10 highest scores made by the player\n\"\"\"\nimport pygame\nfrom score_fetcher import fetch_scores\nfrom entities.sprite_text import TextSprite\n\n\nclass ScoreLoop:\n\n def __init__(self):\n\n self.scores = fetch_scores()\n self.sprites = pygame.sprite.Group()\n self.get_score_sprites()\n\n self.space_cooldown = True\n\n def get_score_sprites(self):\n\n rank = 1\n\n for score in self.scores:\n self.sprites.add(\n TextSprite(str(score), 256, 100+50*rank, True)\n )\n rank += 1\n\n def increment(self):\n\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_SPACE]:\n if self.space_cooldown:\n return None\n return \"startloop\"\n self.space_cooldown = False\n return None\n\n def get_sprites(self):\n \"\"\"retruns sprites for the UI\"\"\"\n return self.sprites\n\n\nif __name__ == \"__main__\":\n\n pass\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def train(epoch, model, dataloader, criterion, optimizer, scheduler):
global global_train_step
model.train()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),
desc='train', total=len(opensub_dataset) // opt.realbatch):
tgt_input = tgt[:, :-1]
tgt_gold = tgt[:, 1:]
tgt_lens = tgt_lens - 1
decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths
=src_lens, tgt_lengths=tgt_lens)
decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)
out_seqs = torch.argmax(decoder_output_probs, dim=2)
loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION
loss.backward()
total_loss += loss.item()
bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)
distinct_1_score += distinct_1(out_seqs, tgt_lens)
distinct_2_score += distinct_2(out_seqs, tgt_lens)
global_train_step += 1
writer.log_loss(loss.item() * ACCUMULATION, mode='train')
if (i + 1) % ACCUMULATION == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (i + 1) % opt.logstep == 0:
avg_loss = total_loss / opt.logstep * ACCUMULATION
avg_bleu = bleu_score / opt.logstep
avg_distinct_1 = distinct_1_score / opt.logstep
avg_distinct_2 = distinct_2_score / opt.logstep
mylogger.log(i, epoch, model, value=avg_loss, is_train=True,
info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],
vocab_bulider, global_train_step, mode='train')
<|reserved_special_token_0|>
def run_model(model, train_loader, eval_loader, niter, criterion, optimizer,
scheduler):
mylogger.log_info('Running Model')
for i in range(niter):
mylogger.log_info(
f"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}"
)
train(i, model, train_loader, criterion, optimizer, scheduler)
eval(i, model, eval_loader, criterion, beam_size=opt.beam)
<|reserved_special_token_0|>
def show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,
vocab_bulider, step, mode='train'):
for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,
batch_out_seqs, batch_out_lens, groud_truth):
in_seq = convert_ids_to_seq(in_id, vocab_bulider)
out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else
out_id, vocab_bulider)
gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)
writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),
global_step=step)
writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(epoch, model, dataloader, criterion, optimizer, scheduler):
global global_train_step
model.train()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),
desc='train', total=len(opensub_dataset) // opt.realbatch):
tgt_input = tgt[:, :-1]
tgt_gold = tgt[:, 1:]
tgt_lens = tgt_lens - 1
decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths
=src_lens, tgt_lengths=tgt_lens)
decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)
out_seqs = torch.argmax(decoder_output_probs, dim=2)
loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION
loss.backward()
total_loss += loss.item()
bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)
distinct_1_score += distinct_1(out_seqs, tgt_lens)
distinct_2_score += distinct_2(out_seqs, tgt_lens)
global_train_step += 1
writer.log_loss(loss.item() * ACCUMULATION, mode='train')
if (i + 1) % ACCUMULATION == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (i + 1) % opt.logstep == 0:
avg_loss = total_loss / opt.logstep * ACCUMULATION
avg_bleu = bleu_score / opt.logstep
avg_distinct_1 = distinct_1_score / opt.logstep
avg_distinct_2 = distinct_2_score / opt.logstep
mylogger.log(i, epoch, model, value=avg_loss, is_train=True,
info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],
vocab_bulider, global_train_step, mode='train')
def eval(epoch, model, dataloader, criterion, beam_size=2):
global global_valid_step
model.eval()
criterion.eval()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +
str(epoch)), 'w', encoding='utf-8')
with torch.no_grad():
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,
0), desc='eval', total=len(imsdb_dataset)):
tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)
tgt_gold = tgt[:, 1:]
if beam_size > 1:
output_seqs, output_probs = model.beam_search(src=src,
tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], beam_size=beam_size, max_length
=tgt_lens.item())
else:
output_seqs, output_probs = model.greedy(src=src, tgt_begin
=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], max_length=tgt_lens.item())
min_len = min(tgt_gold.shape[1], output_seqs.shape[1])
loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),
tgt_gold[:, :min_len])
total_loss += loss.item()
out_lens = [min_len]
bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)
distinct_1_score += distinct_1(output_seqs, out_lens)
distinct_2_score += distinct_2(output_seqs, out_lens)
global_valid_step += 1
fout.write(' '.join(convert_ids_to_seq(output_seqs[0],
vocab_bulider)) + '\n')
if (i + 1) % opt.logstep == 0:
show_gen_seq(src, output_seqs, out_lens, tgt_gold,
vocab_bulider, global_valid_step, mode='valid')
avg_loss = total_loss / i
avg_bleu = bleu_score / i
avg_distinct_1 = distinct_1_score / i
avg_distinct_2 = distinct_2_score / i
writer.log_loss(avg_loss, mode='valid')
mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
fout.close()
def run_model(model, train_loader, eval_loader, niter, criterion, optimizer,
scheduler):
mylogger.log_info('Running Model')
for i in range(niter):
mylogger.log_info(
f"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}"
)
train(i, model, train_loader, criterion, optimizer, scheduler)
eval(i, model, eval_loader, criterion, beam_size=opt.beam)
<|reserved_special_token_0|>
def show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,
vocab_bulider, step, mode='train'):
for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,
batch_out_seqs, batch_out_lens, groud_truth):
in_seq = convert_ids_to_seq(in_id, vocab_bulider)
out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else
out_id, vocab_bulider)
gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)
writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),
global_step=step)
writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(epoch, model, dataloader, criterion, optimizer, scheduler):
global global_train_step
model.train()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),
desc='train', total=len(opensub_dataset) // opt.realbatch):
tgt_input = tgt[:, :-1]
tgt_gold = tgt[:, 1:]
tgt_lens = tgt_lens - 1
decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths
=src_lens, tgt_lengths=tgt_lens)
decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)
out_seqs = torch.argmax(decoder_output_probs, dim=2)
loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION
loss.backward()
total_loss += loss.item()
bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)
distinct_1_score += distinct_1(out_seqs, tgt_lens)
distinct_2_score += distinct_2(out_seqs, tgt_lens)
global_train_step += 1
writer.log_loss(loss.item() * ACCUMULATION, mode='train')
if (i + 1) % ACCUMULATION == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (i + 1) % opt.logstep == 0:
avg_loss = total_loss / opt.logstep * ACCUMULATION
avg_bleu = bleu_score / opt.logstep
avg_distinct_1 = distinct_1_score / opt.logstep
avg_distinct_2 = distinct_2_score / opt.logstep
mylogger.log(i, epoch, model, value=avg_loss, is_train=True,
info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],
vocab_bulider, global_train_step, mode='train')
def eval(epoch, model, dataloader, criterion, beam_size=2):
global global_valid_step
model.eval()
criterion.eval()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +
str(epoch)), 'w', encoding='utf-8')
with torch.no_grad():
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,
0), desc='eval', total=len(imsdb_dataset)):
tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)
tgt_gold = tgt[:, 1:]
if beam_size > 1:
output_seqs, output_probs = model.beam_search(src=src,
tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], beam_size=beam_size, max_length
=tgt_lens.item())
else:
output_seqs, output_probs = model.greedy(src=src, tgt_begin
=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], max_length=tgt_lens.item())
min_len = min(tgt_gold.shape[1], output_seqs.shape[1])
loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),
tgt_gold[:, :min_len])
total_loss += loss.item()
out_lens = [min_len]
bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)
distinct_1_score += distinct_1(output_seqs, out_lens)
distinct_2_score += distinct_2(output_seqs, out_lens)
global_valid_step += 1
fout.write(' '.join(convert_ids_to_seq(output_seqs[0],
vocab_bulider)) + '\n')
if (i + 1) % opt.logstep == 0:
show_gen_seq(src, output_seqs, out_lens, tgt_gold,
vocab_bulider, global_valid_step, mode='valid')
avg_loss = total_loss / i
avg_bleu = bleu_score / i
avg_distinct_1 = distinct_1_score / i
avg_distinct_2 = distinct_2_score / i
writer.log_loss(avg_loss, mode='valid')
mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
fout.close()
def run_model(model, train_loader, eval_loader, niter, criterion, optimizer,
scheduler):
mylogger.log_info('Running Model')
for i in range(niter):
mylogger.log_info(
f"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}"
)
train(i, model, train_loader, criterion, optimizer, scheduler)
eval(i, model, eval_loader, criterion, beam_size=opt.beam)
def convert_ids_to_seq(id_seq, vocab_bulider):
return [vocab_bulider.id_to_word(idx) for idx in id_seq]
def show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,
vocab_bulider, step, mode='train'):
for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,
batch_out_seqs, batch_out_lens, groud_truth):
in_seq = convert_ids_to_seq(in_id, vocab_bulider)
out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else
out_id, vocab_bulider)
gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)
writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),
global_step=step)
writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(epoch, model, dataloader, criterion, optimizer, scheduler):
global global_train_step
model.train()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),
desc='train', total=len(opensub_dataset) // opt.realbatch):
tgt_input = tgt[:, :-1]
tgt_gold = tgt[:, 1:]
tgt_lens = tgt_lens - 1
decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths
=src_lens, tgt_lengths=tgt_lens)
decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)
out_seqs = torch.argmax(decoder_output_probs, dim=2)
loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION
loss.backward()
total_loss += loss.item()
bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)
distinct_1_score += distinct_1(out_seqs, tgt_lens)
distinct_2_score += distinct_2(out_seqs, tgt_lens)
global_train_step += 1
writer.log_loss(loss.item() * ACCUMULATION, mode='train')
if (i + 1) % ACCUMULATION == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (i + 1) % opt.logstep == 0:
avg_loss = total_loss / opt.logstep * ACCUMULATION
avg_bleu = bleu_score / opt.logstep
avg_distinct_1 = distinct_1_score / opt.logstep
avg_distinct_2 = distinct_2_score / opt.logstep
mylogger.log(i, epoch, model, value=avg_loss, is_train=True,
info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],
vocab_bulider, global_train_step, mode='train')
def eval(epoch, model, dataloader, criterion, beam_size=2):
global global_valid_step
model.eval()
criterion.eval()
total_loss = 0.0
bleu_score = 0.0
distinct_1_score, distinct_2_score = 0.0, 0.0
fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +
str(epoch)), 'w', encoding='utf-8')
with torch.no_grad():
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,
0), desc='eval', total=len(imsdb_dataset)):
tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)
tgt_gold = tgt[:, 1:]
if beam_size > 1:
output_seqs, output_probs = model.beam_search(src=src,
tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], beam_size=beam_size, max_length
=tgt_lens.item())
else:
output_seqs, output_probs = model.greedy(src=src, tgt_begin
=tgt_begin, src_length=src_lens, eos_token_id=
vocab_bulider['<eos>'], max_length=tgt_lens.item())
min_len = min(tgt_gold.shape[1], output_seqs.shape[1])
loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),
tgt_gold[:, :min_len])
total_loss += loss.item()
out_lens = [min_len]
bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)
distinct_1_score += distinct_1(output_seqs, out_lens)
distinct_2_score += distinct_2(output_seqs, out_lens)
global_valid_step += 1
fout.write(' '.join(convert_ids_to_seq(output_seqs[0],
vocab_bulider)) + '\n')
if (i + 1) % opt.logstep == 0:
show_gen_seq(src, output_seqs, out_lens, tgt_gold,
vocab_bulider, global_valid_step, mode='valid')
avg_loss = total_loss / i
avg_bleu = bleu_score / i
avg_distinct_1 = distinct_1_score / i
avg_distinct_2 = distinct_2_score / i
writer.log_loss(avg_loss, mode='valid')
mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=
f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'
)
fout.close()
def run_model(model, train_loader, eval_loader, niter, criterion, optimizer,
scheduler):
mylogger.log_info('Running Model')
for i in range(niter):
mylogger.log_info(
f"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}"
)
train(i, model, train_loader, criterion, optimizer, scheduler)
eval(i, model, eval_loader, criterion, beam_size=opt.beam)
def convert_ids_to_seq(id_seq, vocab_bulider):
return [vocab_bulider.id_to_word(idx) for idx in id_seq]
def show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,
vocab_bulider, step, mode='train'):
for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,
batch_out_seqs, batch_out_lens, groud_truth):
in_seq = convert_ids_to_seq(in_id, vocab_bulider)
out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else
out_id, vocab_bulider)
gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)
writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),
global_step=step)
writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:
get_index(in_seq, '<pad>')]), global_step=step)
if __name__ == '__main__':
begin_time = time.strftime('%H%M%S', time.localtime())
model_name = 'transformer' + begin_time
opt = parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.cuda.set_device(opt.gpuid)
init_seed(opt.manualSeed)
ACCUMULATION = opt.batchsize // opt.realbatch
mylogger = LogManager(checkpoint_step=10, save_dir='./save', model_name
=model_name, log_file_name=model_name + '.log', mode='max', device=
device)
mylogger.save_args(opt)
writer = SummaryHelper(save_dir='./save', model_name=model_name)
train_data_dir = './data/opensubtitles'
vocab_file_list = ['dialogue_length3_6.post']
vocab_bulider = VocabBulider(train_data_dir, src_files=vocab_file_list,
ignore_unk_error=True, vocab_file='vocab.txt', min_count=opt.
mincount, update=opt.update)
print('most common 50:', vocab_bulider.most_common(50))
mylogger.log_info('vocab size: %d' % len(vocab_bulider))
bleu_metirc = BLEUMetric(vocab_bulider.id2vocab, ignore_smoothing_error
=True)
distinct_1 = DistinctNGram(ngram=1)
distinct_2 = DistinctNGram(ngram=2)
if opt.cotk:
opensub_file_name_list = ['opensub_pair_dev', 'opensub_pair_test',
'opensub_pair_train']
unk_token = None
else:
opensub_file_name_list = ['dialogue_length3_6']
unk_token = 'UNknown'
opensub_dataset = OpenSubDataset(data_dir=train_data_dir, vocab_bulider
=vocab_bulider, file_name_list=opensub_file_name_list, unk_token=
'UNknown', save_process=False, samples=opt.trainsamples, add_bos=
True, add_eos=True)
print(opensub_dataset.sample())
opensub_dataloader = DataLoader(opensub_dataset, batch_size=opt.
realbatch, collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid,
device=device), shuffle=True, num_workers=opt.workers, drop_last=True)
dev_data_dir = './data/imsdb'
imsdb_file_name_list = ['imsdb_lower']
imsdb_dataset = IMSDBDataset(data_dir=dev_data_dir, vocab_bulider=
vocab_bulider, file_name_list=imsdb_file_name_list, save_process=
False, samples=opt.validsamples, add_bos=True, add_eos=True)
print(imsdb_dataset.sample())
imsdb_dataloader = DataLoader(imsdb_dataset, batch_size=1, collate_fn=
PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),
shuffle=False, num_workers=opt.workers, drop_last=True)
if opt.mine:
model = Transformer(ntoken=len(vocab_bulider), d_model=opt.
embedsize, nhead=opt.nhead, num_encoder_layers=opt.encoderlayer,
num_decoder_layers=opt.decoderlayer, dim_feedforward=opt.
feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.
gumbels, use_src_mask=False, use_tgt_mask=True, use_memory_mask
=False, activation='relu', use_vocab_attn=False, use_pos_attn=
False, relative_clip=0, highway=False, device=device,
max_sent_length=32, share_input_output_embedding=False,
share_encoder_decoder_embedding=True, share_vocab_embedding=
True, fix_pos_encoding=opt.fix).to(device)
else:
model = TransformerTorch(ntoken=len(vocab_bulider), d_model=opt.
embedsize, nhead=opt.nhead, num_encoder_layers=opt.encoderlayer,
num_decoder_layers=opt.decoderlayer, dim_feedforward=opt.
feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.
gumbels, use_src_mask=False, use_tgt_mask=False,
use_memory_mask=False, activation='relu', use_vocab_attn=False,
use_pos_attn=False, relative_clip=0, highway=False, device=
device, max_sent_length=32, share_input_output_embedding=False,
share_encoder_decoder_embedding=True, share_vocab_embedding=
True, fix_pos_encoding=opt.fix).to(device)
model.show_graph()
if opt.half:
model = model.half()
if opt.ft:
model = restore_best_state(model, opt.ckpt, save_dir='./save',
device=model.device)
if opt.warmup:
optimizer = RAdam(filter(lambda p: p.requires_grad, model.
parameters()), lr=1.0, betas=(opt.beta1, opt.beta2), eps=opt.eps)
rate_ratio = 1.0 / math.sqrt(opt.embedsize)
scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda
step: rate_ratio * min(1.0 / math.sqrt(step + 1), step * opt.
warmup_step ** -1.5))
else:
optimizer = RAdam(filter(lambda p: p.requires_grad, model.
parameters()), lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt
.eps, weight_decay=opt.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt.
schedulerstep, gamma=opt.gamma)
criterion = LabelSmoothedCrossEntropyLoss(eps=0.1, ignore_index=
vocab_bulider.padid)
global_train_step, global_valid_step = 0, 0
run_model(model, opensub_dataloader, imsdb_dataloader, opt.niter,
criterion, optimizer, scheduler)
writer.close()
<|reserved_special_token_1|>
import os
import math
import time
from tqdm import tqdm
import torch
from torch import nn
import torch.optim as optim
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils.data import DataLoader
from nag.modules import Transformer, TransformerTorch
from nag.logger import LogManager, SummaryHelper
from nag.metric import BLEUMetric, DistinctNGram
from nag.vocab_helper import VocabBulider
from nag.utils import PadCollate, get_index, restore_best_state, init_seed
from nag.dataset import OpenSubDataset, IMSDBDataset
from nag.optimizer import RAdam
from nag.options import parse_args
from nag.criterion import similarity_regularization, LabelSmoothedCrossEntropyLoss
def train(epoch, model, dataloader, criterion, optimizer, scheduler):
global global_train_step
model.train()
total_loss = 0.
bleu_score = 0.
distinct_1_score, distinct_2_score = 0., 0.
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0), desc='train', total=len(opensub_dataset)//opt.realbatch):
tgt_input = tgt[:, :-1]
tgt_gold = tgt[:, 1:]
tgt_lens = tgt_lens - 1
decoder_output_probs, _ = model(
src=src, tgt=tgt_input, src_lengths=src_lens, tgt_lengths=tgt_lens)
decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)
out_seqs = torch.argmax(decoder_output_probs, dim=2)
# loss
loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION
loss.backward()
total_loss += loss.item()
# calculate metrics
bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)
distinct_1_score += distinct_1(out_seqs, tgt_lens)
distinct_2_score += distinct_2(out_seqs, tgt_lens)
# summary writer
global_train_step += 1
writer.log_loss(loss.item()*ACCUMULATION, mode='train')
if (i+1) % ACCUMULATION == 0:
# clip_grad_norm_(model.parameters(), max_norm=5)
optimizer.step()
optimizer.zero_grad()
scheduler.step()
if (i+1) % opt.logstep == 0:
avg_loss = (total_loss / opt.logstep) * ACCUMULATION
avg_bleu = bleu_score / opt.logstep
avg_distinct_1 = distinct_1_score / opt.logstep
avg_distinct_2 = distinct_2_score / opt.logstep
mylogger.log(
i, epoch, model, value=avg_loss, is_train=True,
info=f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}')
total_loss = 0.
bleu_score = 0.
distinct_1_score, distinct_2_score = 0., 0.
show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2], vocab_bulider, global_train_step, mode='train')
def eval(epoch, model, dataloader, criterion, beam_size=2):
global global_valid_step
model.eval()
criterion.eval()
total_loss = 0.
bleu_score = 0.
distinct_1_score, distinct_2_score = 0., 0.
fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' + str(epoch)), 'w', encoding='utf-8')
with torch.no_grad():
for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0), desc='eval', total=len(imsdb_dataset)):
tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)
tgt_gold = tgt[:, 1:]
if beam_size > 1:
output_seqs, output_probs = model.beam_search(
src=src, tgt_begin=tgt_begin, src_length=src_lens,
eos_token_id=vocab_bulider['<eos>'], beam_size=beam_size, max_length=tgt_lens.item())
else:
output_seqs, output_probs = model.greedy(
src=src, tgt_begin=tgt_begin, src_length=src_lens,
eos_token_id=vocab_bulider['<eos>'], max_length=tgt_lens.item())
min_len = min(tgt_gold.shape[1], output_seqs.shape[1])
# loss
loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1), tgt_gold[:, :min_len])
total_loss += loss.item()
# calculate metrics
out_lens = [min_len]
bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)
distinct_1_score += distinct_1(output_seqs, out_lens)
distinct_2_score += distinct_2(output_seqs, out_lens)
# show sequence
global_valid_step += 1
fout.write(' '.join(convert_ids_to_seq(output_seqs[0], vocab_bulider)) + '\n')
if (i+1) % opt.logstep == 0:
show_gen_seq(src, output_seqs, out_lens, tgt_gold, vocab_bulider, global_valid_step, mode='valid')
# summary
avg_loss = total_loss / i
avg_bleu = bleu_score / i
avg_distinct_1 = distinct_1_score / i
avg_distinct_2 = distinct_2_score / i
writer.log_loss(avg_loss, mode='valid')
mylogger.log(
i, epoch, model, value=avg_bleu, is_train=False,
info=f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}')
fout.close()
def run_model(model, train_loader, eval_loader, niter, criterion, optimizer, scheduler):
mylogger.log_info('Running Model')
for i in range(niter):
mylogger.log_info(f'EPOCH: {i}, lr: {optimizer.state_dict()["param_groups"][0]["lr"]}')
train(i, model, train_loader, criterion, optimizer, scheduler)
eval(i, model, eval_loader, criterion, beam_size=opt.beam)
def convert_ids_to_seq(id_seq, vocab_bulider):
return [vocab_bulider.id_to_word(idx) for idx in id_seq]
def show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth, vocab_bulider, step, mode='train'):
for in_id, out_id, out_len, gold_id in zip(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth):
in_seq = convert_ids_to_seq(in_id, vocab_bulider)
out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else out_id, vocab_bulider)
gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)
writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:get_index(in_seq, '<pad>')]), global_step=step)
writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq), global_step=step)
writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:get_index(in_seq, '<pad>')]), global_step=step)
if __name__ == '__main__':
begin_time = time.strftime("%H%M%S", time.localtime())
model_name = 'transformer' + begin_time
opt = parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.cuda.set_device(opt.gpuid)
init_seed(opt.manualSeed)
ACCUMULATION = opt.batchsize // opt.realbatch
mylogger = LogManager(checkpoint_step=10,
save_dir='./save',
model_name=model_name,
log_file_name=model_name + '.log',
mode='max', device=device)
mylogger.save_args(opt)
writer = SummaryHelper(save_dir='./save', model_name=model_name)
train_data_dir = './data/opensubtitles'
# train_data_dir = './data/wmt15en-de'
vocab_file_list = ['dialogue_length3_6.post']
# vocab_file_list = ['all_de-en.bpe.post', 'all_de-en.bpe.response']
vocab_bulider = VocabBulider(
train_data_dir, src_files=vocab_file_list, ignore_unk_error=True,
vocab_file='vocab.txt', min_count=opt.mincount, update=opt.update)
print('most common 50:', vocab_bulider.most_common(50))
mylogger.log_info('vocab size: %d' % len(vocab_bulider))
# metircs
bleu_metirc = BLEUMetric(vocab_bulider.id2vocab, ignore_smoothing_error=True)
distinct_1 = DistinctNGram(ngram=1)
distinct_2 = DistinctNGram(ngram=2)
# train dataset and dataloader
if opt.cotk: # use dataset in paper 'cotk'
# opensub_file_name_list = ['all_de-en.bpe']
opensub_file_name_list = ['opensub_pair_dev', 'opensub_pair_test', 'opensub_pair_train']
unk_token = None
else: # use dataset in paper 'Non-Autoregressive Neural Dialogue Generation'
opensub_file_name_list = ['dialogue_length3_6']
unk_token = 'UNknown'
opensub_dataset = OpenSubDataset(
data_dir=train_data_dir, vocab_bulider=vocab_bulider,
file_name_list=opensub_file_name_list, unk_token='UNknown',
save_process=False, samples=opt.trainsamples, add_bos=True, add_eos=True)
print(opensub_dataset.sample())
opensub_dataloader = DataLoader(
opensub_dataset, batch_size=opt.realbatch,
collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),
shuffle=True, num_workers=opt.workers, drop_last=True)
# dev set
dev_data_dir = './data/imsdb'
imsdb_file_name_list = ['imsdb_lower']
# dev_data_dir = './data/wmt15en-de'
# imsdb_file_name_list = ['newstest']
imsdb_dataset = IMSDBDataset(
data_dir=dev_data_dir, vocab_bulider=vocab_bulider,
file_name_list=imsdb_file_name_list, save_process=False,
samples=opt.validsamples, add_bos=True, add_eos=True)
print(imsdb_dataset.sample())
imsdb_dataloader = DataLoader(
imsdb_dataset, batch_size=1,
collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),
shuffle=False, num_workers=opt.workers, drop_last=True)
# model definition
if opt.mine:
model = Transformer(
ntoken=len(vocab_bulider), d_model=opt.embedsize, nhead=opt.nhead,
num_encoder_layers=opt.encoderlayer, num_decoder_layers=opt.decoderlayer,
dim_feedforward=opt.feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.gumbels,
use_src_mask=False, use_tgt_mask=True, use_memory_mask=False,
activation='relu', use_vocab_attn=False, use_pos_attn=False,
relative_clip=0, highway=False, device=device, max_sent_length=32,
share_input_output_embedding=False, share_encoder_decoder_embedding=True,
share_vocab_embedding=True, fix_pos_encoding=opt.fix).to(device)
else:
model = TransformerTorch(
ntoken=len(vocab_bulider), d_model=opt.embedsize, nhead=opt.nhead,
num_encoder_layers=opt.encoderlayer, num_decoder_layers=opt.decoderlayer,
dim_feedforward=opt.feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.gumbels,
use_src_mask=False, use_tgt_mask=False, use_memory_mask=False,
activation='relu', use_vocab_attn=False, use_pos_attn=False,
relative_clip=0, highway=False, device=device, max_sent_length=32,
share_input_output_embedding=False, share_encoder_decoder_embedding=True,
share_vocab_embedding=True, fix_pos_encoding=opt.fix).to(device)
model.show_graph()
if opt.half:
model = model.half()
if opt.ft:
model = restore_best_state(model, opt.ckpt, save_dir='./save', device=model.device)
# optimizer and scheduler
if opt.warmup:
optimizer = RAdam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=1., betas=(opt.beta1, opt.beta2), eps=opt.eps)
rate_ratio = 1. / math.sqrt(opt.embedsize)
# top_lr = 1 / sqrt(d_model * warmup_step) at step == warmup_step
scheduler = optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: rate_ratio * min(1. / math.sqrt(step+1), step*(opt.warmup_step**(-1.5))))
else:
optimizer = RAdam(
filter(lambda p: p.requires_grad, model.parameters()),
lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt.eps,
weight_decay=opt.weight_decay)
scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size=opt.schedulerstep, gamma=opt.gamma)
# loss function
# criterion = nn.CrossEntropyLoss(ignore_index=vocab_bulider.padid) # for Transformer
criterion = LabelSmoothedCrossEntropyLoss(eps=0.1, ignore_index=vocab_bulider.padid)
# run model
global_train_step, global_valid_step = 0, 0
run_model(
model, opensub_dataloader, imsdb_dataloader,
opt.niter, criterion, optimizer, scheduler)
writer.close()
|
flexible
|
{
"blob_id": "bc6c3383684cbba775d17f81ead3346fe1a01f90",
"index": 5102,
"step-1": "<mask token>\n\n\ndef train(epoch, model, dataloader, criterion, optimizer, scheduler):\n global global_train_step\n model.train()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),\n desc='train', total=len(opensub_dataset) // opt.realbatch):\n tgt_input = tgt[:, :-1]\n tgt_gold = tgt[:, 1:]\n tgt_lens = tgt_lens - 1\n decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths\n =src_lens, tgt_lengths=tgt_lens)\n decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)\n out_seqs = torch.argmax(decoder_output_probs, dim=2)\n loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION\n loss.backward()\n total_loss += loss.item()\n bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)\n distinct_1_score += distinct_1(out_seqs, tgt_lens)\n distinct_2_score += distinct_2(out_seqs, tgt_lens)\n global_train_step += 1\n writer.log_loss(loss.item() * ACCUMULATION, mode='train')\n if (i + 1) % ACCUMULATION == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (i + 1) % opt.logstep == 0:\n avg_loss = total_loss / opt.logstep * ACCUMULATION\n avg_bleu = bleu_score / opt.logstep\n avg_distinct_1 = distinct_1_score / opt.logstep\n avg_distinct_2 = distinct_2_score / opt.logstep\n mylogger.log(i, epoch, model, value=avg_loss, is_train=True,\n info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],\n vocab_bulider, global_train_step, mode='train')\n\n\n<mask token>\n\n\ndef run_model(model, train_loader, eval_loader, niter, criterion, optimizer,\n scheduler):\n mylogger.log_info('Running Model')\n for i in range(niter):\n mylogger.log_info(\n f\"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}\"\n )\n train(i, model, train_loader, criterion, optimizer, scheduler)\n eval(i, model, eval_loader, criterion, beam_size=opt.beam)\n\n\n<mask token>\n\n\ndef show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,\n vocab_bulider, step, mode='train'):\n for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,\n batch_out_seqs, batch_out_lens, groud_truth):\n in_seq = convert_ids_to_seq(in_id, vocab_bulider)\n out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else\n out_id, vocab_bulider)\n gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)\n writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),\n global_step=step)\n writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(epoch, model, dataloader, criterion, optimizer, scheduler):\n global global_train_step\n model.train()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),\n desc='train', total=len(opensub_dataset) // opt.realbatch):\n tgt_input = tgt[:, :-1]\n tgt_gold = tgt[:, 1:]\n tgt_lens = tgt_lens - 1\n decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths\n =src_lens, tgt_lengths=tgt_lens)\n decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)\n out_seqs = torch.argmax(decoder_output_probs, dim=2)\n loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION\n loss.backward()\n total_loss += loss.item()\n bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)\n distinct_1_score += distinct_1(out_seqs, tgt_lens)\n distinct_2_score += distinct_2(out_seqs, tgt_lens)\n global_train_step += 1\n writer.log_loss(loss.item() * ACCUMULATION, mode='train')\n if (i + 1) % ACCUMULATION == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (i + 1) % opt.logstep == 0:\n avg_loss = total_loss / opt.logstep * ACCUMULATION\n avg_bleu = bleu_score / opt.logstep\n avg_distinct_1 = distinct_1_score / opt.logstep\n avg_distinct_2 = distinct_2_score / opt.logstep\n mylogger.log(i, epoch, model, value=avg_loss, is_train=True,\n info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],\n vocab_bulider, global_train_step, mode='train')\n\n\ndef eval(epoch, model, dataloader, criterion, beam_size=2):\n global global_valid_step\n model.eval()\n criterion.eval()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +\n str(epoch)), 'w', encoding='utf-8')\n with torch.no_grad():\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,\n 0), desc='eval', total=len(imsdb_dataset)):\n tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)\n tgt_gold = tgt[:, 1:]\n if beam_size > 1:\n output_seqs, output_probs = model.beam_search(src=src,\n tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], beam_size=beam_size, max_length\n =tgt_lens.item())\n else:\n output_seqs, output_probs = model.greedy(src=src, tgt_begin\n =tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], max_length=tgt_lens.item())\n min_len = min(tgt_gold.shape[1], output_seqs.shape[1])\n loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),\n tgt_gold[:, :min_len])\n total_loss += loss.item()\n out_lens = [min_len]\n bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)\n distinct_1_score += distinct_1(output_seqs, out_lens)\n distinct_2_score += distinct_2(output_seqs, out_lens)\n global_valid_step += 1\n fout.write(' '.join(convert_ids_to_seq(output_seqs[0],\n vocab_bulider)) + '\\n')\n if (i + 1) % opt.logstep == 0:\n show_gen_seq(src, output_seqs, out_lens, tgt_gold,\n vocab_bulider, global_valid_step, mode='valid')\n avg_loss = total_loss / i\n avg_bleu = bleu_score / i\n avg_distinct_1 = distinct_1_score / i\n avg_distinct_2 = distinct_2_score / i\n writer.log_loss(avg_loss, mode='valid')\n mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n fout.close()\n\n\ndef run_model(model, train_loader, eval_loader, niter, criterion, optimizer,\n scheduler):\n mylogger.log_info('Running Model')\n for i in range(niter):\n mylogger.log_info(\n f\"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}\"\n )\n train(i, model, train_loader, criterion, optimizer, scheduler)\n eval(i, model, eval_loader, criterion, beam_size=opt.beam)\n\n\n<mask token>\n\n\ndef show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,\n vocab_bulider, step, mode='train'):\n for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,\n batch_out_seqs, batch_out_lens, groud_truth):\n in_seq = convert_ids_to_seq(in_id, vocab_bulider)\n out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else\n out_id, vocab_bulider)\n gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)\n writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),\n global_step=step)\n writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef train(epoch, model, dataloader, criterion, optimizer, scheduler):\n global global_train_step\n model.train()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),\n desc='train', total=len(opensub_dataset) // opt.realbatch):\n tgt_input = tgt[:, :-1]\n tgt_gold = tgt[:, 1:]\n tgt_lens = tgt_lens - 1\n decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths\n =src_lens, tgt_lengths=tgt_lens)\n decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)\n out_seqs = torch.argmax(decoder_output_probs, dim=2)\n loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION\n loss.backward()\n total_loss += loss.item()\n bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)\n distinct_1_score += distinct_1(out_seqs, tgt_lens)\n distinct_2_score += distinct_2(out_seqs, tgt_lens)\n global_train_step += 1\n writer.log_loss(loss.item() * ACCUMULATION, mode='train')\n if (i + 1) % ACCUMULATION == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (i + 1) % opt.logstep == 0:\n avg_loss = total_loss / opt.logstep * ACCUMULATION\n avg_bleu = bleu_score / opt.logstep\n avg_distinct_1 = distinct_1_score / opt.logstep\n avg_distinct_2 = distinct_2_score / opt.logstep\n mylogger.log(i, epoch, model, value=avg_loss, is_train=True,\n info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],\n vocab_bulider, global_train_step, mode='train')\n\n\ndef eval(epoch, model, dataloader, criterion, beam_size=2):\n global global_valid_step\n model.eval()\n criterion.eval()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +\n str(epoch)), 'w', encoding='utf-8')\n with torch.no_grad():\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,\n 0), desc='eval', total=len(imsdb_dataset)):\n tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)\n tgt_gold = tgt[:, 1:]\n if beam_size > 1:\n output_seqs, output_probs = model.beam_search(src=src,\n tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], beam_size=beam_size, max_length\n =tgt_lens.item())\n else:\n output_seqs, output_probs = model.greedy(src=src, tgt_begin\n =tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], max_length=tgt_lens.item())\n min_len = min(tgt_gold.shape[1], output_seqs.shape[1])\n loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),\n tgt_gold[:, :min_len])\n total_loss += loss.item()\n out_lens = [min_len]\n bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)\n distinct_1_score += distinct_1(output_seqs, out_lens)\n distinct_2_score += distinct_2(output_seqs, out_lens)\n global_valid_step += 1\n fout.write(' '.join(convert_ids_to_seq(output_seqs[0],\n vocab_bulider)) + '\\n')\n if (i + 1) % opt.logstep == 0:\n show_gen_seq(src, output_seqs, out_lens, tgt_gold,\n vocab_bulider, global_valid_step, mode='valid')\n avg_loss = total_loss / i\n avg_bleu = bleu_score / i\n avg_distinct_1 = distinct_1_score / i\n avg_distinct_2 = distinct_2_score / i\n writer.log_loss(avg_loss, mode='valid')\n mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n fout.close()\n\n\ndef run_model(model, train_loader, eval_loader, niter, criterion, optimizer,\n scheduler):\n mylogger.log_info('Running Model')\n for i in range(niter):\n mylogger.log_info(\n f\"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}\"\n )\n train(i, model, train_loader, criterion, optimizer, scheduler)\n eval(i, model, eval_loader, criterion, beam_size=opt.beam)\n\n\ndef convert_ids_to_seq(id_seq, vocab_bulider):\n return [vocab_bulider.id_to_word(idx) for idx in id_seq]\n\n\ndef show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,\n vocab_bulider, step, mode='train'):\n for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,\n batch_out_seqs, batch_out_lens, groud_truth):\n in_seq = convert_ids_to_seq(in_id, vocab_bulider)\n out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else\n out_id, vocab_bulider)\n gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)\n writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),\n global_step=step)\n writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef train(epoch, model, dataloader, criterion, optimizer, scheduler):\n global global_train_step\n model.train()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0),\n desc='train', total=len(opensub_dataset) // opt.realbatch):\n tgt_input = tgt[:, :-1]\n tgt_gold = tgt[:, 1:]\n tgt_lens = tgt_lens - 1\n decoder_output_probs, _ = model(src=src, tgt=tgt_input, src_lengths\n =src_lens, tgt_lengths=tgt_lens)\n decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)\n out_seqs = torch.argmax(decoder_output_probs, dim=2)\n loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION\n loss.backward()\n total_loss += loss.item()\n bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)\n distinct_1_score += distinct_1(out_seqs, tgt_lens)\n distinct_2_score += distinct_2(out_seqs, tgt_lens)\n global_train_step += 1\n writer.log_loss(loss.item() * ACCUMULATION, mode='train')\n if (i + 1) % ACCUMULATION == 0:\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n if (i + 1) % opt.logstep == 0:\n avg_loss = total_loss / opt.logstep * ACCUMULATION\n avg_bleu = bleu_score / opt.logstep\n avg_distinct_1 = distinct_1_score / opt.logstep\n avg_distinct_2 = distinct_2_score / opt.logstep\n mylogger.log(i, epoch, model, value=avg_loss, is_train=True,\n info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2],\n vocab_bulider, global_train_step, mode='train')\n\n\ndef eval(epoch, model, dataloader, criterion, beam_size=2):\n global global_valid_step\n model.eval()\n criterion.eval()\n total_loss = 0.0\n bleu_score = 0.0\n distinct_1_score, distinct_2_score = 0.0, 0.0\n fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' +\n str(epoch)), 'w', encoding='utf-8')\n with torch.no_grad():\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader,\n 0), desc='eval', total=len(imsdb_dataset)):\n tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)\n tgt_gold = tgt[:, 1:]\n if beam_size > 1:\n output_seqs, output_probs = model.beam_search(src=src,\n tgt_begin=tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], beam_size=beam_size, max_length\n =tgt_lens.item())\n else:\n output_seqs, output_probs = model.greedy(src=src, tgt_begin\n =tgt_begin, src_length=src_lens, eos_token_id=\n vocab_bulider['<eos>'], max_length=tgt_lens.item())\n min_len = min(tgt_gold.shape[1], output_seqs.shape[1])\n loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1),\n tgt_gold[:, :min_len])\n total_loss += loss.item()\n out_lens = [min_len]\n bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)\n distinct_1_score += distinct_1(output_seqs, out_lens)\n distinct_2_score += distinct_2(output_seqs, out_lens)\n global_valid_step += 1\n fout.write(' '.join(convert_ids_to_seq(output_seqs[0],\n vocab_bulider)) + '\\n')\n if (i + 1) % opt.logstep == 0:\n show_gen_seq(src, output_seqs, out_lens, tgt_gold,\n vocab_bulider, global_valid_step, mode='valid')\n avg_loss = total_loss / i\n avg_bleu = bleu_score / i\n avg_distinct_1 = distinct_1_score / i\n avg_distinct_2 = distinct_2_score / i\n writer.log_loss(avg_loss, mode='valid')\n mylogger.log(i, epoch, model, value=avg_bleu, is_train=False, info=\n f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}'\n )\n fout.close()\n\n\ndef run_model(model, train_loader, eval_loader, niter, criterion, optimizer,\n scheduler):\n mylogger.log_info('Running Model')\n for i in range(niter):\n mylogger.log_info(\n f\"EPOCH: {i}, lr: {optimizer.state_dict()['param_groups'][0]['lr']}\"\n )\n train(i, model, train_loader, criterion, optimizer, scheduler)\n eval(i, model, eval_loader, criterion, beam_size=opt.beam)\n\n\ndef convert_ids_to_seq(id_seq, vocab_bulider):\n return [vocab_bulider.id_to_word(idx) for idx in id_seq]\n\n\ndef show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth,\n vocab_bulider, step, mode='train'):\n for in_id, out_id, out_len, gold_id in zip(batch_in_seqs,\n batch_out_seqs, batch_out_lens, groud_truth):\n in_seq = convert_ids_to_seq(in_id, vocab_bulider)\n out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else\n out_id, vocab_bulider)\n gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)\n writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq),\n global_step=step)\n writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:\n get_index(in_seq, '<pad>')]), global_step=step)\n\n\nif __name__ == '__main__':\n begin_time = time.strftime('%H%M%S', time.localtime())\n model_name = 'transformer' + begin_time\n opt = parse_args()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n torch.cuda.set_device(opt.gpuid)\n init_seed(opt.manualSeed)\n ACCUMULATION = opt.batchsize // opt.realbatch\n mylogger = LogManager(checkpoint_step=10, save_dir='./save', model_name\n =model_name, log_file_name=model_name + '.log', mode='max', device=\n device)\n mylogger.save_args(opt)\n writer = SummaryHelper(save_dir='./save', model_name=model_name)\n train_data_dir = './data/opensubtitles'\n vocab_file_list = ['dialogue_length3_6.post']\n vocab_bulider = VocabBulider(train_data_dir, src_files=vocab_file_list,\n ignore_unk_error=True, vocab_file='vocab.txt', min_count=opt.\n mincount, update=opt.update)\n print('most common 50:', vocab_bulider.most_common(50))\n mylogger.log_info('vocab size: %d' % len(vocab_bulider))\n bleu_metirc = BLEUMetric(vocab_bulider.id2vocab, ignore_smoothing_error\n =True)\n distinct_1 = DistinctNGram(ngram=1)\n distinct_2 = DistinctNGram(ngram=2)\n if opt.cotk:\n opensub_file_name_list = ['opensub_pair_dev', 'opensub_pair_test',\n 'opensub_pair_train']\n unk_token = None\n else:\n opensub_file_name_list = ['dialogue_length3_6']\n unk_token = 'UNknown'\n opensub_dataset = OpenSubDataset(data_dir=train_data_dir, vocab_bulider\n =vocab_bulider, file_name_list=opensub_file_name_list, unk_token=\n 'UNknown', save_process=False, samples=opt.trainsamples, add_bos=\n True, add_eos=True)\n print(opensub_dataset.sample())\n opensub_dataloader = DataLoader(opensub_dataset, batch_size=opt.\n realbatch, collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid,\n device=device), shuffle=True, num_workers=opt.workers, drop_last=True)\n dev_data_dir = './data/imsdb'\n imsdb_file_name_list = ['imsdb_lower']\n imsdb_dataset = IMSDBDataset(data_dir=dev_data_dir, vocab_bulider=\n vocab_bulider, file_name_list=imsdb_file_name_list, save_process=\n False, samples=opt.validsamples, add_bos=True, add_eos=True)\n print(imsdb_dataset.sample())\n imsdb_dataloader = DataLoader(imsdb_dataset, batch_size=1, collate_fn=\n PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),\n shuffle=False, num_workers=opt.workers, drop_last=True)\n if opt.mine:\n model = Transformer(ntoken=len(vocab_bulider), d_model=opt.\n embedsize, nhead=opt.nhead, num_encoder_layers=opt.encoderlayer,\n num_decoder_layers=opt.decoderlayer, dim_feedforward=opt.\n feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.\n gumbels, use_src_mask=False, use_tgt_mask=True, use_memory_mask\n =False, activation='relu', use_vocab_attn=False, use_pos_attn=\n False, relative_clip=0, highway=False, device=device,\n max_sent_length=32, share_input_output_embedding=False,\n share_encoder_decoder_embedding=True, share_vocab_embedding=\n True, fix_pos_encoding=opt.fix).to(device)\n else:\n model = TransformerTorch(ntoken=len(vocab_bulider), d_model=opt.\n embedsize, nhead=opt.nhead, num_encoder_layers=opt.encoderlayer,\n num_decoder_layers=opt.decoderlayer, dim_feedforward=opt.\n feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.\n gumbels, use_src_mask=False, use_tgt_mask=False,\n use_memory_mask=False, activation='relu', use_vocab_attn=False,\n use_pos_attn=False, relative_clip=0, highway=False, device=\n device, max_sent_length=32, share_input_output_embedding=False,\n share_encoder_decoder_embedding=True, share_vocab_embedding=\n True, fix_pos_encoding=opt.fix).to(device)\n model.show_graph()\n if opt.half:\n model = model.half()\n if opt.ft:\n model = restore_best_state(model, opt.ckpt, save_dir='./save',\n device=model.device)\n if opt.warmup:\n optimizer = RAdam(filter(lambda p: p.requires_grad, model.\n parameters()), lr=1.0, betas=(opt.beta1, opt.beta2), eps=opt.eps)\n rate_ratio = 1.0 / math.sqrt(opt.embedsize)\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda\n step: rate_ratio * min(1.0 / math.sqrt(step + 1), step * opt.\n warmup_step ** -1.5))\n else:\n optimizer = RAdam(filter(lambda p: p.requires_grad, model.\n parameters()), lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt\n .eps, weight_decay=opt.weight_decay)\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=opt.\n schedulerstep, gamma=opt.gamma)\n criterion = LabelSmoothedCrossEntropyLoss(eps=0.1, ignore_index=\n vocab_bulider.padid)\n global_train_step, global_valid_step = 0, 0\n run_model(model, opensub_dataloader, imsdb_dataloader, opt.niter,\n criterion, optimizer, scheduler)\n writer.close()\n",
"step-5": "import os\r\nimport math\r\nimport time\r\nfrom tqdm import tqdm\r\nimport torch\r\nfrom torch import nn\r\nimport torch.optim as optim\r\nfrom torch.nn import functional as F\r\nfrom torch.nn.utils import clip_grad_norm_\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom nag.modules import Transformer, TransformerTorch\r\nfrom nag.logger import LogManager, SummaryHelper\r\nfrom nag.metric import BLEUMetric, DistinctNGram\r\nfrom nag.vocab_helper import VocabBulider\r\nfrom nag.utils import PadCollate, get_index, restore_best_state, init_seed\r\nfrom nag.dataset import OpenSubDataset, IMSDBDataset\r\nfrom nag.optimizer import RAdam\r\nfrom nag.options import parse_args\r\nfrom nag.criterion import similarity_regularization, LabelSmoothedCrossEntropyLoss\r\n\r\n\r\ndef train(epoch, model, dataloader, criterion, optimizer, scheduler):\r\n global global_train_step\r\n model.train()\r\n total_loss = 0.\r\n bleu_score = 0.\r\n distinct_1_score, distinct_2_score = 0., 0.\r\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0), desc='train', total=len(opensub_dataset)//opt.realbatch):\r\n tgt_input = tgt[:, :-1]\r\n tgt_gold = tgt[:, 1:]\r\n tgt_lens = tgt_lens - 1\r\n decoder_output_probs, _ = model(\r\n src=src, tgt=tgt_input, src_lengths=src_lens, tgt_lengths=tgt_lens)\r\n decoder_output_probs_T = decoder_output_probs.permute(0, 2, 1)\r\n out_seqs = torch.argmax(decoder_output_probs, dim=2)\r\n # loss\r\n loss = criterion(decoder_output_probs_T, tgt_gold) / ACCUMULATION\r\n loss.backward()\r\n total_loss += loss.item()\r\n # calculate metrics\r\n bleu_score += bleu_metirc(tgt_gold, out_seqs, tgt_lens)\r\n distinct_1_score += distinct_1(out_seqs, tgt_lens)\r\n distinct_2_score += distinct_2(out_seqs, tgt_lens)\r\n # summary writer\r\n global_train_step += 1\r\n writer.log_loss(loss.item()*ACCUMULATION, mode='train')\r\n if (i+1) % ACCUMULATION == 0:\r\n # clip_grad_norm_(model.parameters(), max_norm=5)\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n scheduler.step()\r\n if (i+1) % opt.logstep == 0:\r\n avg_loss = (total_loss / opt.logstep) * ACCUMULATION\r\n avg_bleu = bleu_score / opt.logstep\r\n avg_distinct_1 = distinct_1_score / opt.logstep\r\n avg_distinct_2 = distinct_2_score / opt.logstep\r\n mylogger.log(\r\n i, epoch, model, value=avg_loss, is_train=True,\r\n info=f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}')\r\n total_loss = 0.\r\n bleu_score = 0.\r\n distinct_1_score, distinct_2_score = 0., 0.\r\n show_gen_seq(src[:2], out_seqs[:2], tgt_lens[:2], tgt_gold[:2], vocab_bulider, global_train_step, mode='train')\r\n\r\n\r\ndef eval(epoch, model, dataloader, criterion, beam_size=2):\r\n global global_valid_step\r\n model.eval()\r\n criterion.eval()\r\n total_loss = 0.\r\n bleu_score = 0.\r\n distinct_1_score, distinct_2_score = 0., 0.\r\n fout = open(os.path.join('./save/' + model_name + '/', model_name + '_' + str(epoch)), 'w', encoding='utf-8')\r\n with torch.no_grad():\r\n for i, (src, tgt, src_lens, tgt_lens) in tqdm(enumerate(dataloader, 0), desc='eval', total=len(imsdb_dataset)):\r\n tgt_begin = torch.LongTensor([[vocab_bulider['<bos>']]]).to(device)\r\n tgt_gold = tgt[:, 1:]\r\n if beam_size > 1:\r\n output_seqs, output_probs = model.beam_search(\r\n src=src, tgt_begin=tgt_begin, src_length=src_lens,\r\n eos_token_id=vocab_bulider['<eos>'], beam_size=beam_size, max_length=tgt_lens.item())\r\n else:\r\n output_seqs, output_probs = model.greedy(\r\n src=src, tgt_begin=tgt_begin, src_length=src_lens,\r\n eos_token_id=vocab_bulider['<eos>'], max_length=tgt_lens.item())\r\n min_len = min(tgt_gold.shape[1], output_seqs.shape[1])\r\n # loss\r\n loss = criterion(output_probs[:, :min_len, :].permute(0, 2, 1), tgt_gold[:, :min_len])\r\n total_loss += loss.item()\r\n # calculate metrics\r\n out_lens = [min_len]\r\n bleu_score += bleu_metirc(tgt_gold, output_seqs, out_lens)\r\n distinct_1_score += distinct_1(output_seqs, out_lens)\r\n distinct_2_score += distinct_2(output_seqs, out_lens)\r\n # show sequence\r\n global_valid_step += 1\r\n fout.write(' '.join(convert_ids_to_seq(output_seqs[0], vocab_bulider)) + '\\n')\r\n if (i+1) % opt.logstep == 0:\r\n show_gen_seq(src, output_seqs, out_lens, tgt_gold, vocab_bulider, global_valid_step, mode='valid')\r\n # summary\r\n avg_loss = total_loss / i\r\n avg_bleu = bleu_score / i\r\n avg_distinct_1 = distinct_1_score / i\r\n avg_distinct_2 = distinct_2_score / i\r\n writer.log_loss(avg_loss, mode='valid')\r\n mylogger.log(\r\n i, epoch, model, value=avg_bleu, is_train=False,\r\n info=f'loss: {avg_loss:.4f} | ppl: {math.exp(avg_loss):.4f} | BLEU: {avg_bleu:.5f} | d1: {avg_distinct_1:.3f} | d2: {avg_distinct_2:.3f}')\r\n fout.close()\r\n\r\n\r\ndef run_model(model, train_loader, eval_loader, niter, criterion, optimizer, scheduler):\r\n mylogger.log_info('Running Model')\r\n for i in range(niter):\r\n mylogger.log_info(f'EPOCH: {i}, lr: {optimizer.state_dict()[\"param_groups\"][0][\"lr\"]}')\r\n train(i, model, train_loader, criterion, optimizer, scheduler)\r\n eval(i, model, eval_loader, criterion, beam_size=opt.beam)\r\n\r\n\r\ndef convert_ids_to_seq(id_seq, vocab_bulider):\r\n return [vocab_bulider.id_to_word(idx) for idx in id_seq]\r\n\r\n\r\ndef show_gen_seq(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth, vocab_bulider, step, mode='train'):\r\n for in_id, out_id, out_len, gold_id in zip(batch_in_seqs, batch_out_seqs, batch_out_lens, groud_truth):\r\n in_seq = convert_ids_to_seq(in_id, vocab_bulider)\r\n out_seq = convert_ids_to_seq(out_id[:out_len] if out_len > 0 else out_id, vocab_bulider)\r\n gold_seq = convert_ids_to_seq(gold_id, vocab_bulider)\r\n writer.add_text(tag=mode + '_post', sentence=' '.join(in_seq[:get_index(in_seq, '<pad>')]), global_step=step)\r\n writer.add_text(tag=mode + '_pred', sentence=' '.join(out_seq), global_step=step)\r\n writer.add_text(tag=mode + '_reps', sentence=' '.join(gold_seq[:get_index(in_seq, '<pad>')]), global_step=step)\r\n\r\n\r\nif __name__ == '__main__':\r\n begin_time = time.strftime(\"%H%M%S\", time.localtime())\r\n model_name = 'transformer' + begin_time\r\n opt = parse_args()\r\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\r\n torch.cuda.set_device(opt.gpuid)\r\n init_seed(opt.manualSeed)\r\n ACCUMULATION = opt.batchsize // opt.realbatch\r\n\r\n mylogger = LogManager(checkpoint_step=10,\r\n save_dir='./save',\r\n model_name=model_name,\r\n log_file_name=model_name + '.log',\r\n mode='max', device=device)\r\n mylogger.save_args(opt)\r\n writer = SummaryHelper(save_dir='./save', model_name=model_name)\r\n\r\n train_data_dir = './data/opensubtitles'\r\n # train_data_dir = './data/wmt15en-de'\r\n\r\n vocab_file_list = ['dialogue_length3_6.post']\r\n # vocab_file_list = ['all_de-en.bpe.post', 'all_de-en.bpe.response']\r\n vocab_bulider = VocabBulider(\r\n train_data_dir, src_files=vocab_file_list, ignore_unk_error=True,\r\n vocab_file='vocab.txt', min_count=opt.mincount, update=opt.update)\r\n print('most common 50:', vocab_bulider.most_common(50))\r\n mylogger.log_info('vocab size: %d' % len(vocab_bulider))\r\n\r\n # metircs\r\n bleu_metirc = BLEUMetric(vocab_bulider.id2vocab, ignore_smoothing_error=True)\r\n distinct_1 = DistinctNGram(ngram=1)\r\n distinct_2 = DistinctNGram(ngram=2)\r\n\r\n # train dataset and dataloader\r\n if opt.cotk: # use dataset in paper 'cotk'\r\n # opensub_file_name_list = ['all_de-en.bpe']\r\n opensub_file_name_list = ['opensub_pair_dev', 'opensub_pair_test', 'opensub_pair_train']\r\n unk_token = None\r\n else: # use dataset in paper 'Non-Autoregressive Neural Dialogue Generation'\r\n opensub_file_name_list = ['dialogue_length3_6']\r\n unk_token = 'UNknown'\r\n opensub_dataset = OpenSubDataset(\r\n data_dir=train_data_dir, vocab_bulider=vocab_bulider,\r\n file_name_list=opensub_file_name_list, unk_token='UNknown',\r\n save_process=False, samples=opt.trainsamples, add_bos=True, add_eos=True)\r\n print(opensub_dataset.sample())\r\n opensub_dataloader = DataLoader(\r\n opensub_dataset, batch_size=opt.realbatch,\r\n collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),\r\n shuffle=True, num_workers=opt.workers, drop_last=True)\r\n\r\n # dev set\r\n dev_data_dir = './data/imsdb'\r\n imsdb_file_name_list = ['imsdb_lower']\r\n # dev_data_dir = './data/wmt15en-de'\r\n # imsdb_file_name_list = ['newstest']\r\n imsdb_dataset = IMSDBDataset(\r\n data_dir=dev_data_dir, vocab_bulider=vocab_bulider,\r\n file_name_list=imsdb_file_name_list, save_process=False,\r\n samples=opt.validsamples, add_bos=True, add_eos=True)\r\n print(imsdb_dataset.sample())\r\n imsdb_dataloader = DataLoader(\r\n imsdb_dataset, batch_size=1,\r\n collate_fn=PadCollate(dim=0, pad_id=vocab_bulider.padid, device=device),\r\n shuffle=False, num_workers=opt.workers, drop_last=True)\r\n\r\n # model definition\r\n if opt.mine:\r\n model = Transformer(\r\n ntoken=len(vocab_bulider), d_model=opt.embedsize, nhead=opt.nhead,\r\n num_encoder_layers=opt.encoderlayer, num_decoder_layers=opt.decoderlayer,\r\n dim_feedforward=opt.feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.gumbels,\r\n use_src_mask=False, use_tgt_mask=True, use_memory_mask=False,\r\n activation='relu', use_vocab_attn=False, use_pos_attn=False,\r\n relative_clip=0, highway=False, device=device, max_sent_length=32,\r\n share_input_output_embedding=False, share_encoder_decoder_embedding=True,\r\n share_vocab_embedding=True, fix_pos_encoding=opt.fix).to(device)\r\n else:\r\n model = TransformerTorch(\r\n ntoken=len(vocab_bulider), d_model=opt.embedsize, nhead=opt.nhead,\r\n num_encoder_layers=opt.encoderlayer, num_decoder_layers=opt.decoderlayer,\r\n dim_feedforward=opt.feedforward, postnorm=True, dropout=opt.dropout, gumbels=opt.gumbels,\r\n use_src_mask=False, use_tgt_mask=False, use_memory_mask=False,\r\n activation='relu', use_vocab_attn=False, use_pos_attn=False,\r\n relative_clip=0, highway=False, device=device, max_sent_length=32,\r\n share_input_output_embedding=False, share_encoder_decoder_embedding=True,\r\n share_vocab_embedding=True, fix_pos_encoding=opt.fix).to(device)\r\n model.show_graph()\r\n if opt.half:\r\n model = model.half()\r\n if opt.ft:\r\n model = restore_best_state(model, opt.ckpt, save_dir='./save', device=model.device)\r\n\r\n # optimizer and scheduler\r\n if opt.warmup:\r\n optimizer = RAdam(\r\n filter(lambda p: p.requires_grad, model.parameters()),\r\n lr=1., betas=(opt.beta1, opt.beta2), eps=opt.eps)\r\n rate_ratio = 1. / math.sqrt(opt.embedsize)\r\n # top_lr = 1 / sqrt(d_model * warmup_step) at step == warmup_step\r\n scheduler = optim.lr_scheduler.LambdaLR(\r\n optimizer,\r\n lr_lambda=lambda step: rate_ratio * min(1. / math.sqrt(step+1), step*(opt.warmup_step**(-1.5))))\r\n else:\r\n optimizer = RAdam(\r\n filter(lambda p: p.requires_grad, model.parameters()),\r\n lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt.eps,\r\n weight_decay=opt.weight_decay)\r\n scheduler = optim.lr_scheduler.StepLR(\r\n optimizer, step_size=opt.schedulerstep, gamma=opt.gamma)\r\n # loss function\r\n # criterion = nn.CrossEntropyLoss(ignore_index=vocab_bulider.padid) # for Transformer\r\n criterion = LabelSmoothedCrossEntropyLoss(eps=0.1, ignore_index=vocab_bulider.padid)\r\n\r\n # run model\r\n global_train_step, global_valid_step = 0, 0\r\n run_model(\r\n model, opensub_dataloader, imsdb_dataloader,\r\n opt.niter, criterion, optimizer, scheduler)\r\n writer.close()\r\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(__file__).parents[2
] / 'figures' / 'simulations' / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
<|reserved_special_token_1|>
import sys
import pathlib
from matplotlib import pyplot as plt
import matplotlib as mpl
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(__file__).parents[2
] / 'figures' / 'simulations' / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
<|reserved_special_token_1|>
import sys
import pathlib
from matplotlib import pyplot as plt
import matplotlib as mpl
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(
__file__).parents[2] / "figures" / "simulations" / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
# mpl.rc("text", usetex=True)
# mpl.rc("font", family="serif")
# mpl.rc(
# "text.latex",
# preamble=r"\usepackage{mathpazo} \usepackage{eulervm} \usepackage{amssymb}"
# r"\usepackage{amsmath} \usepackage{bm} \usepackage{DejaVuSans}",
# )
|
flexible
|
{
"blob_id": "fc26574ac8628d7e2896e3e6d055ac61264c7db0",
"index": 1302,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nFIGURES_DIR.mkdir(exist_ok=True, parents=True)\n",
"step-3": "<mask token>\nscript_name = pathlib.Path(sys.argv[0]).stem\nFIGURES_DIR = pathlib.Path(__file__).parents[2\n ] / 'figures' / 'simulations' / script_name\nFIGURES_DIR.mkdir(exist_ok=True, parents=True)\n",
"step-4": "import sys\nimport pathlib\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\nscript_name = pathlib.Path(sys.argv[0]).stem\nFIGURES_DIR = pathlib.Path(__file__).parents[2\n ] / 'figures' / 'simulations' / script_name\nFIGURES_DIR.mkdir(exist_ok=True, parents=True)\n",
"step-5": "import sys\nimport pathlib\n\nfrom matplotlib import pyplot as plt\nimport matplotlib as mpl\n\nscript_name = pathlib.Path(sys.argv[0]).stem\nFIGURES_DIR = pathlib.Path(\n __file__).parents[2] / \"figures\" / \"simulations\" / script_name\nFIGURES_DIR.mkdir(exist_ok=True, parents=True)\n\n# mpl.rc(\"text\", usetex=True)\n# mpl.rc(\"font\", family=\"serif\")\n# mpl.rc(\n# \"text.latex\",\n# preamble=r\"\\usepackage{mathpazo} \\usepackage{eulervm} \\usepackage{amssymb}\"\n# r\"\\usepackage{amsmath} \\usepackage{bm} \\usepackage{DejaVuSans}\",\n# )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from .test_function import *
from .support_funcs import *
table_DIXMAAN = dict()
table_DIXMAAN['A'] = (1, 0, 0.125, 0.125, 0, 0, 0, 0)
table_DIXMAAN['B'] = (1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1)
table_DIXMAAN['C'] = (1, 0.125, 0.125, 0.125, 0, 0, 0, 0)
table_DIXMAAN['D'] = (1, 0.26, 0.26, 0.26, 0, 0, 0, 0)
table_DIXMAAN['E'] = (1, 0, 0.125, 0.125, 1, 0, 0, 1)
table_DIXMAAN['F'] = (1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1)
table_DIXMAAN['G'] = (1, 0.125, 0.125, 0.125, 1, 0, 0, 1)
table_DIXMAAN['H'] = (1, 0.26, 0.26, 0.26, 1, 0, 0, 1)
table_DIXMAAN['I'] = (1, 0, 0.125, 0.125, 2, 0, 0, 2)
table_DIXMAAN['J'] = (1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2)
table_DIXMAAN['K'] = (1, 0.125, 0.125, 0.125, 2, 0, 0, 2)
table_DIXMAAN['L'] = (1, 0.26, 0.26, 0.26, 2, 0, 0, 2)
def DIXMAAN(type):
def DIXMAAN_(n):
name = "DIXMAAN%c function (CUTE)" % type
alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]
m = n // 3
sm = lambda i: alpha * xi(i) ** 2 *(i / n) ** k1
sm2 = lambda i: beta * xi(i) ** 2 * (xi(i+1) + xi(i+1)**2) * (i / n) ** k2
sm3 = lambda i: gamma * xi(i)**2 * xi(i+m) ** 4 * (i / n) ** k3
sm4 = lambda i: sigma * xi(i) * xi(i+2*m) * (i / n) ** k4
f_1 = lambda: sum([sm2(i) for i in range(1, n)])
f_2 = lambda: sum([sm3(i) for i in range(1, 2 * m + 1)])
f_3 = lambda: sum([sm4(i) for i in range(1, m + 1)])
f = lambda: 1 + f_1() + f_2() + f_3()
x0 = np.ones((n, 1)) * 2.0
return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)
DIXMAAN_.__name__ += type
return DIXMAAN_
|
normal
|
{
"blob_id": "7026f4549019c25cb736af556fe46fd360fba46f",
"index": 2238,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-3": "<mask token>\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1\ntable_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0\ntable_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1\ntable_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1\ntable_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2\ntable_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-4": "from .test_function import *\nfrom .support_funcs import *\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = 1, 0, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['B'] = 1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1\ntable_DIXMAAN['C'] = 1, 0.125, 0.125, 0.125, 0, 0, 0, 0\ntable_DIXMAAN['D'] = 1, 0.26, 0.26, 0.26, 0, 0, 0, 0\ntable_DIXMAAN['E'] = 1, 0, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['F'] = 1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1\ntable_DIXMAAN['G'] = 1, 0.125, 0.125, 0.125, 1, 0, 0, 1\ntable_DIXMAAN['H'] = 1, 0.26, 0.26, 0.26, 1, 0, 0, 1\ntable_DIXMAAN['I'] = 1, 0, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['J'] = 1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2\ntable_DIXMAAN['K'] = 1, 0.125, 0.125, 0.125, 2, 0, 0, 2\ntable_DIXMAAN['L'] = 1, 0.26, 0.26, 0.26, 2, 0, 0, 2\n\n\ndef DIXMAAN(type):\n\n def DIXMAAN_(n):\n name = 'DIXMAAN%c function (CUTE)' % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 * (i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i + 1) + xi(i + 1) ** 2) * (i /\n n) ** k2\n sm3 = lambda i: gamma * xi(i) ** 2 * xi(i + m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i + 2 * m) * (i / n) ** k4\n f_1 = lambda : sum([sm2(i) for i in range(1, n)])\n f_2 = lambda : sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda : sum([sm4(i) for i in range(1, m + 1)])\n f = lambda : 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=\n default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_\n",
"step-5": "from .test_function import *\nfrom .support_funcs import *\n\ntable_DIXMAAN = dict()\ntable_DIXMAAN['A'] = (1, 0, 0.125, 0.125, 0, 0, 0, 0)\ntable_DIXMAAN['B'] = (1, 0.0625, 0.0625, 0.0625, 0, 0, 0, 1)\ntable_DIXMAAN['C'] = (1, 0.125, 0.125, 0.125, 0, 0, 0, 0)\ntable_DIXMAAN['D'] = (1, 0.26, 0.26, 0.26, 0, 0, 0, 0)\ntable_DIXMAAN['E'] = (1, 0, 0.125, 0.125, 1, 0, 0, 1)\ntable_DIXMAAN['F'] = (1, 0.0625, 0.0625, 0.0625, 1, 0, 0, 1)\ntable_DIXMAAN['G'] = (1, 0.125, 0.125, 0.125, 1, 0, 0, 1)\ntable_DIXMAAN['H'] = (1, 0.26, 0.26, 0.26, 1, 0, 0, 1)\ntable_DIXMAAN['I'] = (1, 0, 0.125, 0.125, 2, 0, 0, 2)\ntable_DIXMAAN['J'] = (1, 0.0625, 0.0625, 0.0625, 2, 0, 0, 2)\ntable_DIXMAAN['K'] = (1, 0.125, 0.125, 0.125, 2, 0, 0, 2)\ntable_DIXMAAN['L'] = (1, 0.26, 0.26, 0.26, 2, 0, 0, 2)\n\n\ndef DIXMAAN(type):\n def DIXMAAN_(n):\n name = \"DIXMAAN%c function (CUTE)\" % type\n alpha, beta, gamma, sigma, k1, k2, k3, k4 = table_DIXMAAN[type]\n m = n // 3\n sm = lambda i: alpha * xi(i) ** 2 *(i / n) ** k1\n sm2 = lambda i: beta * xi(i) ** 2 * (xi(i+1) + xi(i+1)**2) * (i / n) ** k2\n sm3 = lambda i: gamma * xi(i)**2 * xi(i+m) ** 4 * (i / n) ** k3\n sm4 = lambda i: sigma * xi(i) * xi(i+2*m) * (i / n) ** k4\n f_1 = lambda: sum([sm2(i) for i in range(1, n)])\n f_2 = lambda: sum([sm3(i) for i in range(1, 2 * m + 1)])\n f_3 = lambda: sum([sm4(i) for i in range(1, m + 1)])\n f = lambda: 1 + f_1() + f_2() + f_3()\n x0 = np.ones((n, 1)) * 2.0\n return create_test_function(name, n, sm, x0, first=f, range_func=default_range_1)\n DIXMAAN_.__name__ += type\n return DIXMAAN_",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(torch.empty(self.embed_nums, self.
embed_dims))
self.add_name(self.weight, 'weight')
if bias:
self.bias = nn.Parameter(torch.zeros(self.embed_dims))
self.add_name(self.bias, 'bias')
else:
self.bias = None
self.reset_parameters()
<|reserved_special_token_0|>
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False, layer_norm
=False, dropout=0.0, scale=False, name='embedding'):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary['source'])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size, self.
embedding_size, name='word_embedding')
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
self.pos_embeddings = Embedding(params.max_pos, self.
embedding_size, name='pos_embedding')
elif self.pos_embed == 'functional':
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError('Unsupported position embedding: %s' %
pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size, name='type_embedding')
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size, eps=params
.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=
'word_embedding').to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :
] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * self.embedding_size ** 0.5
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(
input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == 'functional':
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PositionalEmbedding(torch.nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(torch.empty(self.embed_nums, self.
embed_dims))
self.add_name(self.weight, 'weight')
if bias:
self.bias = nn.Parameter(torch.zeros(self.embed_dims))
self.add_name(self.bias, 'bias')
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False, layer_norm
=False, dropout=0.0, scale=False, name='embedding'):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary['source'])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size, self.
embedding_size, name='word_embedding')
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
self.pos_embeddings = Embedding(params.max_pos, self.
embedding_size, name='pos_embedding')
elif self.pos_embed == 'functional':
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError('Unsupported position embedding: %s' %
pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size, name='type_embedding')
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size, eps=params
.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=
'word_embedding').to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :
] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * self.embedding_size ** 0.5
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(
input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == 'functional':
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PositionalEmbedding(torch.nn.Module):
<|reserved_special_token_0|>
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype, device=inputs.
device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=
inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(torch.empty(self.embed_nums, self.
embed_dims))
self.add_name(self.weight, 'weight')
if bias:
self.bias = nn.Parameter(torch.zeros(self.embed_dims))
self.add_name(self.bias, 'bias')
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False, layer_norm
=False, dropout=0.0, scale=False, name='embedding'):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary['source'])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size, self.
embedding_size, name='word_embedding')
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
self.pos_embeddings = Embedding(params.max_pos, self.
embedding_size, name='pos_embedding')
elif self.pos_embed == 'functional':
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError('Unsupported position embedding: %s' %
pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size, name='type_embedding')
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size, eps=params
.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=
'word_embedding').to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :
] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * self.embedding_size ** 0.5
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(
input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == 'functional':
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thuctc.utils as utils
from thuctc.modules.module import Module
from thuctc.modules.layer_norm import LayerNorm
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError('The rank of input must be 3.')
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype, device=inputs.
device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=
inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(torch.empty(self.embed_nums, self.
embed_dims))
self.add_name(self.weight, 'weight')
if bias:
self.bias = nn.Parameter(torch.zeros(self.embed_dims))
self.add_name(self.bias, 'bias')
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False, layer_norm
=False, dropout=0.0, scale=False, name='embedding'):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary['source'])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size, self.
embedding_size, name='word_embedding')
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
self.pos_embeddings = Embedding(params.max_pos, self.
embedding_size, name='pos_embedding')
elif self.pos_embed == 'functional':
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError('Unsupported position embedding: %s' %
pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size, name='type_embedding')
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size, eps=params
.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=
'word_embedding').to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :
] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * self.embedding_size ** 0.5
if self.pos_embed is not None:
if self.pos_embed == 'learnable':
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(
input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == 'functional':
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
<|reserved_special_token_1|>
# coding=utf-8
# Copyright 2021-Present The THUCTC Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import torch
import torch.nn as nn
import thuctc.utils as utils
from thuctc.modules.module import Module
from thuctc.modules.layer_norm import LayerNorm
class PositionalEmbedding(torch.nn.Module):
def __init__(self):
super(PositionalEmbedding, self).__init__()
def forward(self, inputs):
if inputs.dim() != 3:
raise ValueError("The rank of input must be 3.")
length = inputs.shape[1]
channels = inputs.shape[2]
half_dim = channels // 2
positions = torch.arange(length, dtype=inputs.dtype,
device=inputs.device)
dimensions = torch.arange(half_dim, dtype=inputs.dtype,
device=inputs.device)
scale = math.log(10000.0) / float(half_dim - 1)
dimensions.mul_(-scale).exp_()
scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],
dim=1)
if channels % 2 == 1:
pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,
device=inputs.device)
signal = torch.cat([signal, pad], axis=1)
return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)
class Embedding(Module):
def __init__(self, embed_nums, embed_dims, bias=False, name="embedding"):
super(Embedding, self).__init__(name=name)
self.embed_nums = embed_nums
self.embed_dims = embed_dims
with utils.scope(name):
self.weight = nn.Parameter(
torch.empty(self.embed_nums, self.embed_dims))
self.add_name(self.weight, "weight")
if bias:
self.bias = nn.Parameter(
torch.zeros(self.embed_dims))
self.add_name(self.bias, "bias")
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.weight, mean=0.0,
std=self.embed_dims ** -0.5)
def forward(self, inputs):
outputs = nn.functional.embedding(inputs, self.weight)
if self.bias is not None:
outputs = outputs + self.bias
return outputs
class UnifiedEmbedding(Module):
def __init__(self, params, pos_embed=None, type_embed=False,
layer_norm=False, dropout=0.0, scale=False, name="embedding"):
super(UnifiedEmbedding, self).__init__(name=name)
self.pos_embed = pos_embed
self.type_embed = type_embed
self.vocab_size = len(params.vocabulary["source"])
self.embedding_size = params.embedding_size
self.layer_norm = None
self.out_dropout = None
self.scale = scale
if dropout > 0:
self.out_dropout = nn.Dropout(p=dropout)
with utils.scope(name):
self.word_embeddings = Embedding(self.vocab_size,
self.embedding_size,
name="word_embedding")
if self.pos_embed is not None:
if self.pos_embed == "learnable":
self.pos_embeddings = Embedding(params.max_pos,
self.embedding_size,
name="pos_embedding")
elif self.pos_embed == "functional":
self.pos_embeddings = PositionalEmbedding()
else:
raise ValueError("Unsupported position "
"embedding: %s" % pos_embed)
if self.type_embed:
self.type_embeddings = Embedding(params.type_vocab_size,
self.embedding_size,
name="type_embedding")
if layer_norm:
self.layer_norm = LayerNorm(self.embedding_size,
eps=params.layer_norm_eps)
def resize_word_embedding(self, new_vocab_size):
old_embeddings = self.word_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
new_embeddings = Embedding(new_vocab_size,
old_embedding_dim,
name="word_embedding").to(old_embeddings.weight)
new_embeddings.reset_parameters()
new_embeddings.weight.data[:old_num_tokens, :] = old_embeddings.weight.data
self.word_embeddings = new_embeddings
self.vocab_size = new_vocab_size
def forward(self, input_ids, token_type_ids=None, position_ids=None):
inp_shape = input_ids.size()
inp_length = inp_shape[1]
inputs = self.word_embeddings(input_ids)
if self.scale:
inputs = inputs * (self.embedding_size ** 0.5)
if self.pos_embed is not None:
if self.pos_embed == "learnable":
if position_ids is None:
position_ids = torch.arange(inp_length).to(input_ids)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
inputs = inputs + self.pos_embeddings(position_ids)
elif self.pos_embed == "functional":
inputs = self.pos_embeddings(inputs)
if self.type_embed:
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
inputs = inputs + self.type_embeddings(token_type_ids)
if self.layer_norm is not None:
inputs = self.layer_norm(inputs)
if self.out_dropout is not None:
inputs = self.out_dropout(inputs)
return inputs
|
flexible
|
{
"blob_id": "c773b273ad6953bf9c74b11c44aff16e9fd0860e",
"index": 3468,
"step-1": "<mask token>\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n <mask token>\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-2": "<mask token>\n\n\nclass PositionalEmbedding(torch.nn.Module):\n <mask token>\n <mask token>\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-3": "<mask token>\n\n\nclass PositionalEmbedding(torch.nn.Module):\n <mask token>\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError('The rank of input must be 3.')\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n positions = torch.arange(length, dtype=inputs.dtype, device=inputs.\n device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=\n inputs.device)\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport math\nimport torch\nimport torch.nn as nn\nimport thuctc.utils as utils\nfrom thuctc.modules.module import Module\nfrom thuctc.modules.layer_norm import LayerNorm\n\n\nclass PositionalEmbedding(torch.nn.Module):\n\n def __init__(self):\n super(PositionalEmbedding, self).__init__()\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError('The rank of input must be 3.')\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n positions = torch.arange(length, dtype=inputs.dtype, device=inputs.\n device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype, device=\n inputs.device)\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name='embedding'):\n super(Embedding, self).__init__(name=name)\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n with utils.scope(name):\n self.weight = nn.Parameter(torch.empty(self.embed_nums, self.\n embed_dims))\n self.add_name(self.weight, 'weight')\n if bias:\n self.bias = nn.Parameter(torch.zeros(self.embed_dims))\n self.add_name(self.bias, 'bias')\n else:\n self.bias = None\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0, std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n if self.bias is not None:\n outputs = outputs + self.bias\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False, layer_norm\n =False, dropout=0.0, scale=False, name='embedding'):\n super(UnifiedEmbedding, self).__init__(name=name)\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary['source'])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size, self.\n embedding_size, name='word_embedding')\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n self.pos_embeddings = Embedding(params.max_pos, self.\n embedding_size, name='pos_embedding')\n elif self.pos_embed == 'functional':\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError('Unsupported position embedding: %s' %\n pos_embed)\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size, name='type_embedding')\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size, eps=params\n .layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size):\n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size, old_embedding_dim, name=\n 'word_embedding').to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :\n ] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n inputs = self.word_embeddings(input_ids)\n if self.scale:\n inputs = inputs * self.embedding_size ** 0.5\n if self.pos_embed is not None:\n if self.pos_embed == 'learnable':\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(\n input_ids)\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == 'functional':\n inputs = self.pos_embeddings(inputs)\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n inputs = inputs + self.type_embeddings(token_type_ids)\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n return inputs\n",
"step-5": "# coding=utf-8\n# Copyright 2021-Present The THUCTC Authors\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport torch\n\nimport torch.nn as nn\nimport thuctc.utils as utils\n\nfrom thuctc.modules.module import Module\nfrom thuctc.modules.layer_norm import LayerNorm\n\n\nclass PositionalEmbedding(torch.nn.Module):\n\n def __init__(self):\n super(PositionalEmbedding, self).__init__()\n\n def forward(self, inputs):\n if inputs.dim() != 3:\n raise ValueError(\"The rank of input must be 3.\")\n\n length = inputs.shape[1]\n channels = inputs.shape[2]\n half_dim = channels // 2\n\n positions = torch.arange(length, dtype=inputs.dtype,\n device=inputs.device)\n dimensions = torch.arange(half_dim, dtype=inputs.dtype,\n device=inputs.device)\n\n scale = math.log(10000.0) / float(half_dim - 1)\n dimensions.mul_(-scale).exp_()\n\n scaled_time = positions.unsqueeze(1) * dimensions.unsqueeze(0)\n signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)],\n dim=1)\n\n if channels % 2 == 1:\n pad = torch.zeros([signal.shape[0], 1], dtype=inputs.dtype,\n device=inputs.device)\n signal = torch.cat([signal, pad], axis=1)\n\n return inputs + torch.reshape(signal, [1, -1, channels]).to(inputs)\n\n\nclass Embedding(Module):\n\n def __init__(self, embed_nums, embed_dims, bias=False, name=\"embedding\"):\n super(Embedding, self).__init__(name=name)\n\n self.embed_nums = embed_nums\n self.embed_dims = embed_dims\n\n with utils.scope(name):\n self.weight = nn.Parameter(\n torch.empty(self.embed_nums, self.embed_dims))\n self.add_name(self.weight, \"weight\")\n\n if bias:\n self.bias = nn.Parameter(\n torch.zeros(self.embed_dims))\n self.add_name(self.bias, \"bias\")\n else:\n self.bias = None\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.normal_(self.weight, mean=0.0,\n std=self.embed_dims ** -0.5)\n\n def forward(self, inputs):\n outputs = nn.functional.embedding(inputs, self.weight)\n\n if self.bias is not None:\n outputs = outputs + self.bias\n\n return outputs\n\n\nclass UnifiedEmbedding(Module):\n\n def __init__(self, params, pos_embed=None, type_embed=False,\n layer_norm=False, dropout=0.0, scale=False, name=\"embedding\"):\n super(UnifiedEmbedding, self).__init__(name=name)\n\n self.pos_embed = pos_embed\n self.type_embed = type_embed\n self.vocab_size = len(params.vocabulary[\"source\"])\n self.embedding_size = params.embedding_size\n self.layer_norm = None\n self.out_dropout = None\n self.scale = scale\n\n if dropout > 0:\n self.out_dropout = nn.Dropout(p=dropout)\n\n with utils.scope(name):\n self.word_embeddings = Embedding(self.vocab_size,\n self.embedding_size,\n name=\"word_embedding\")\n\n if self.pos_embed is not None:\n if self.pos_embed == \"learnable\":\n self.pos_embeddings = Embedding(params.max_pos,\n self.embedding_size,\n name=\"pos_embedding\")\n elif self.pos_embed == \"functional\":\n self.pos_embeddings = PositionalEmbedding()\n else:\n raise ValueError(\"Unsupported position \"\n \"embedding: %s\" % pos_embed)\n\n if self.type_embed:\n self.type_embeddings = Embedding(params.type_vocab_size,\n self.embedding_size,\n name=\"type_embedding\")\n\n if layer_norm:\n self.layer_norm = LayerNorm(self.embedding_size,\n eps=params.layer_norm_eps)\n\n def resize_word_embedding(self, new_vocab_size): \n old_embeddings = self.word_embeddings\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n new_embeddings = Embedding(new_vocab_size,\n old_embedding_dim,\n name=\"word_embedding\").to(old_embeddings.weight)\n new_embeddings.reset_parameters()\n new_embeddings.weight.data[:old_num_tokens, :] = old_embeddings.weight.data\n self.word_embeddings = new_embeddings\n self.vocab_size = new_vocab_size\n\n def forward(self, input_ids, token_type_ids=None, position_ids=None):\n inp_shape = input_ids.size()\n inp_length = inp_shape[1]\n\n inputs = self.word_embeddings(input_ids)\n\n if self.scale:\n inputs = inputs * (self.embedding_size ** 0.5)\n\n if self.pos_embed is not None:\n if self.pos_embed == \"learnable\":\n if position_ids is None:\n position_ids = torch.arange(inp_length).to(input_ids)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids)\n\n inputs = inputs + self.pos_embeddings(position_ids)\n elif self.pos_embed == \"functional\":\n inputs = self.pos_embeddings(inputs)\n\n if self.type_embed:\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n inputs = inputs + self.type_embeddings(token_type_ids)\n\n if self.layer_norm is not None:\n inputs = self.layer_norm(inputs)\n\n if self.out_dropout is not None:\n inputs = self.out_dropout(inputs)\n\n return inputs\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
class StaticBox(wx.Dialog):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StaticBox(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size=(250, 230))
wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))
wx.CheckBox(self, -1, 'Male', (15, 30))
wx.CheckBox(self, -1, 'Married', (15, 55))
wx.StaticText(self, -1, 'Age', (15, 95))
wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)
wx.Button(self, 1, 'Ok', (90, 185), (60, -1))
self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)
self.Center()
self.ShowModal()
self.Destroy()
def OnClose(self, event):
self.Close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class StaticBox(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size=(250, 230))
wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))
wx.CheckBox(self, -1, 'Male', (15, 30))
wx.CheckBox(self, -1, 'Married', (15, 55))
wx.StaticText(self, -1, 'Age', (15, 95))
wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)
wx.Button(self, 1, 'Ok', (90, 185), (60, -1))
self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)
self.Center()
self.ShowModal()
self.Destroy()
def OnClose(self, event):
self.Close()
if __name__ == '__main__':
app = wx.App()
StaticBox(None, -1, 'staticbox.py')
app.MainLoop()
<|reserved_special_token_1|>
import wx
class StaticBox(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size=(250, 230))
wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))
wx.CheckBox(self, -1, 'Male', (15, 30))
wx.CheckBox(self, -1, 'Married', (15, 55))
wx.StaticText(self, -1, 'Age', (15, 95))
wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)
wx.Button(self, 1, 'Ok', (90, 185), (60, -1))
self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)
self.Center()
self.ShowModal()
self.Destroy()
def OnClose(self, event):
self.Close()
if __name__ == '__main__':
app = wx.App()
StaticBox(None, -1, 'staticbox.py')
app.MainLoop()
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# staticbox.py
import wx
class StaticBox(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size = (250, 230))
wx.StaticBox(self, -1, 'Personal Info', (5, 5), size = (240, 170))
wx.CheckBox(self, -1, 'Male', (15, 30))
wx.CheckBox(self, -1, 'Married', (15, 55))
wx.StaticText(self, -1, 'Age', (15, 95))
wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min = 1, max = 120)
wx.Button(self, 1, 'Ok', (90, 185), (60, -1))
self.Bind(wx.EVT_BUTTON, self.OnClose, id = 1)
self.Center()
self.ShowModal()
self.Destroy()
def OnClose(self, event):
self.Close()
if __name__ == '__main__':
app = wx.App()
StaticBox(None, -1, 'staticbox.py')
app.MainLoop()
|
flexible
|
{
"blob_id": "96bf6220bfc884e3a19f70a63d9ecba449e2e7e2",
"index": 6108,
"step-1": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n",
"step-4": "import wx\n\n\nclass StaticBox(wx.Dialog):\n\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size=(250, 230))\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size=(240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min=1, max=120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n self.Bind(wx.EVT_BUTTON, self.OnClose, id=1)\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n",
"step-5": "#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n\n# staticbox.py\n\nimport wx\n\nclass StaticBox(wx.Dialog):\n def __init__(self, parent, id, title):\n wx.Dialog.__init__(self, parent, id, title, size = (250, 230))\n\n wx.StaticBox(self, -1, 'Personal Info', (5, 5), size = (240, 170))\n wx.CheckBox(self, -1, 'Male', (15, 30))\n wx.CheckBox(self, -1, 'Married', (15, 55))\n wx.StaticText(self, -1, 'Age', (15, 95))\n wx.SpinCtrl(self, -1, '1', (55, 90), (60, -1), min = 1, max = 120)\n wx.Button(self, 1, 'Ok', (90, 185), (60, -1))\n\n self.Bind(wx.EVT_BUTTON, self.OnClose, id = 1)\n\n self.Center()\n self.ShowModal()\n self.Destroy()\n\n def OnClose(self, event):\n self.Close()\n\nif __name__ == '__main__':\n app = wx.App()\n StaticBox(None, -1, 'staticbox.py')\n app.MainLoop()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def my_logistic(x, a, b, c):
return c / (1 + a * np.exp(-b * x))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('Agg')
<|reserved_special_token_0|>
df_mrns.sort_values('COLLECTION_DT', inplace=True)
df_mrns.drop_duplicates('MRN', keep='first', inplace=True)
<|reserved_special_token_0|>
df_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)
df_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)
<|reserved_special_token_0|>
df_2.drop_duplicates('MRN', keep='first', inplace=True)
<|reserved_special_token_0|>
df.sort_values('COLLECTION_DT', inplace=True)
df.variant.fillna(0, inplace=True)
<|reserved_special_token_0|>
df_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=
False)
def my_logistic(x, a, b, c):
return c / (1 + a * np.exp(-b * x))
<|reserved_special_token_0|>
plt.scatter(x, y)
plt.plot(x, my_logistic(x, a, b, c))
<|reserved_special_token_0|>
plt.plot(xprime, yprime)
plt.savefig('log_fit_best_fit' + tag + '.png')
plt.close()
<|reserved_special_token_0|>
for i, p, var in zip(range(n), pars, np.diag(pcov)):
sigma = var ** 0.5
if i == 1:
val_dw = p - sigma * tval
val_up = p + sigma * tval
print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *
tval))
plt.plot(x, y, 'bo', markersize=5, label='Observed')
<|reserved_special_token_0|>
plt.plot(xprime, yprime, label='Predicted')
<|reserved_special_token_0|>
plt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=
'95% CI')
plt.title('Logistic growth model [' + tag + ']', fontsize=18)
plt.xlabel('Days since ' + days_since, fontsize=15)
plt.ylabel('Percent of patients ', fontsize=15)
plt.legend()
plt.savefig('log_pred_best_fit' + tag + '.png')
plt.close()
<|reserved_special_token_0|>
print(dt)
<|reserved_special_token_0|>
print(dt)
<|reserved_special_token_0|>
print(dt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('Agg')
<|reserved_special_token_0|>
params = read_run_params()
run = params['current_run']
out_home = params['container'] + 'output/'
out_dir = out_home + run + '/'
df = pd.read_csv(out_dir +
'4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')
df = df[df.quality == 'HQ']
tag = 'B.1.617.Family'
voi = ['B.1.617.2', 'AY.2', 'AY.3']
start_date = '4-15-2021'
end_date = '7-20-2021'
days_since = '4/15/2021'
days = 180
keep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])
df_mrns = df[df.MRN.isin(keep_mrns_variant)]
df_mrns = df_mrns[df_mrns.variant.isin(voi)]
df_mrns.sort_values('COLLECTION_DT', inplace=True)
df_mrns.drop_duplicates('MRN', keep='first', inplace=True)
keep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])
df_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]
df_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin
(voi)]
df_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)
df_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)
df_2 = df_mrns.append(df_mrns_not_variant)
df_2.drop_duplicates('MRN', keep='first', inplace=True)
df = df_2
df = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]
df.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)
df.COLLECTION_DT = df.COLLECTION_DT.dt.date
df = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.
COLLECTION_DT < pd.to_datetime(end_date))]
df.sort_values('COLLECTION_DT', inplace=True)
df.variant.fillna(0, inplace=True)
df.variant = [(1 if x in voi else 0) for x in df.variant]
df_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()
df_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()
dates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +
timedelta(days=1) - timedelta(days=1), freq='d')
df_data = pd.DataFrame(dates)
df_data.columns = ['dates']
df_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]
df_data['total'] = df_count.variant
df_data['variant'] = df_variant.variant
df_data['variant_csum'] = np.cumsum(df_variant.variant.values)
df_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,
df_data.total)]
df_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=
False)
def my_logistic(x, a, b, c):
return c / (1 + a * np.exp(-b * x))
x = np.array(df_data.date_step)
y = np.array(df_data.variant_percent)
po = np.random.exponential(size=3)
bounds = 0, [1000.0, 2.0, 100.0]
(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)
plt.scatter(x, y)
plt.plot(x, my_logistic(x, a, b, c))
xprime = np.array([x for x in range(1, 170, 1)])
yprime = my_logistic(xprime, a, b, c)
plt.plot(xprime, yprime)
plt.savefig('log_fit_best_fit' + tag + '.png')
plt.close()
<|reserved_special_token_0|>
pars, pcov = (a, b, c), cov
alpha = 0.05
n = len(y)
p = len(pars)
dof = max(0, n - p)
tval = t.ppf(1.0 - alpha / 2.0, dof)
val_dw = 0
val_up = 0
for i, p, var in zip(range(n), pars, np.diag(pcov)):
sigma = var ** 0.5
if i == 1:
val_dw = p - sigma * tval
val_up = p + sigma * tval
print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *
tval))
plt.plot(x, y, 'bo', markersize=5, label='Observed')
xprime = np.array([x for x in range(1, days, 1)])
yprime = my_logistic(xprime, a, b, c)
plt.plot(xprime, yprime, label='Predicted')
xpred = np.array([x for x in range(1, days, 1)])
ypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])
ypred_up = my_logistic(xpred, pars[0], val_up, pars[2])
plt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=
'95% CI')
plt.title('Logistic growth model [' + tag + ']', fontsize=18)
plt.xlabel('Days since ' + days_since, fontsize=15)
plt.ylabel('Percent of patients ', fontsize=15)
plt.legend()
plt.savefig('log_pred_best_fit' + tag + '.png')
plt.close()
gr = b
dt = 70 / (gr * 100)
print(dt)
gr = val_up
dt = 70 / (gr * 100)
print(dt)
gr = val_dw
dt = 70 / (gr * 100)
print(dt)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
from datetime import timedelta
import scipy.optimize as optim
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gen_utils.gen_io import read_run_params, log_msg
params = read_run_params()
run = params['current_run']
out_home = params['container'] + 'output/'
out_dir = out_home + run + '/'
df = pd.read_csv(out_dir +
'4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')
df = df[df.quality == 'HQ']
tag = 'B.1.617.Family'
voi = ['B.1.617.2', 'AY.2', 'AY.3']
start_date = '4-15-2021'
end_date = '7-20-2021'
days_since = '4/15/2021'
days = 180
keep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])
df_mrns = df[df.MRN.isin(keep_mrns_variant)]
df_mrns = df_mrns[df_mrns.variant.isin(voi)]
df_mrns.sort_values('COLLECTION_DT', inplace=True)
df_mrns.drop_duplicates('MRN', keep='first', inplace=True)
keep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])
df_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]
df_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin
(voi)]
df_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)
df_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)
df_2 = df_mrns.append(df_mrns_not_variant)
df_2.drop_duplicates('MRN', keep='first', inplace=True)
df = df_2
df = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]
df.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)
df.COLLECTION_DT = df.COLLECTION_DT.dt.date
df = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.
COLLECTION_DT < pd.to_datetime(end_date))]
df.sort_values('COLLECTION_DT', inplace=True)
df.variant.fillna(0, inplace=True)
df.variant = [(1 if x in voi else 0) for x in df.variant]
df_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()
df_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()
dates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +
timedelta(days=1) - timedelta(days=1), freq='d')
df_data = pd.DataFrame(dates)
df_data.columns = ['dates']
df_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]
df_data['total'] = df_count.variant
df_data['variant'] = df_variant.variant
df_data['variant_csum'] = np.cumsum(df_variant.variant.values)
df_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,
df_data.total)]
df_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=
False)
def my_logistic(x, a, b, c):
return c / (1 + a * np.exp(-b * x))
x = np.array(df_data.date_step)
y = np.array(df_data.variant_percent)
po = np.random.exponential(size=3)
bounds = 0, [1000.0, 2.0, 100.0]
(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)
plt.scatter(x, y)
plt.plot(x, my_logistic(x, a, b, c))
xprime = np.array([x for x in range(1, 170, 1)])
yprime = my_logistic(xprime, a, b, c)
plt.plot(xprime, yprime)
plt.savefig('log_fit_best_fit' + tag + '.png')
plt.close()
from scipy.stats.distributions import t
pars, pcov = (a, b, c), cov
alpha = 0.05
n = len(y)
p = len(pars)
dof = max(0, n - p)
tval = t.ppf(1.0 - alpha / 2.0, dof)
val_dw = 0
val_up = 0
for i, p, var in zip(range(n), pars, np.diag(pcov)):
sigma = var ** 0.5
if i == 1:
val_dw = p - sigma * tval
val_up = p + sigma * tval
print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *
tval))
plt.plot(x, y, 'bo', markersize=5, label='Observed')
xprime = np.array([x for x in range(1, days, 1)])
yprime = my_logistic(xprime, a, b, c)
plt.plot(xprime, yprime, label='Predicted')
xpred = np.array([x for x in range(1, days, 1)])
ypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])
ypred_up = my_logistic(xpred, pars[0], val_up, pars[2])
plt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=
'95% CI')
plt.title('Logistic growth model [' + tag + ']', fontsize=18)
plt.xlabel('Days since ' + days_since, fontsize=15)
plt.ylabel('Percent of patients ', fontsize=15)
plt.legend()
plt.savefig('log_pred_best_fit' + tag + '.png')
plt.close()
gr = b
dt = 70 / (gr * 100)
print(dt)
gr = val_up
dt = 70 / (gr * 100)
print(dt)
gr = val_dw
dt = 70 / (gr * 100)
print(dt)
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
from datetime import timedelta
import scipy.optimize as optim
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gen_utils.gen_io import read_run_params,log_msg
#############################################
params = read_run_params()
run = params["current_run"]
out_home = params["container"]+"output/"
out_dir = out_home+run+"/"
df = pd.read_csv(out_dir+"4_mcov_strain_variant_map_covid_pangolin_db_input_"+run+".csv")
df = df[df.quality=="HQ"]
#########################
tag="B.1.617.Family"
voi=["B.1.617.2","AY.2","AY.3"]
start_date = "4-15-2021"
end_date = "7-20-2021"
days_since="4/15/2021"
days= 180
# voi="P.1"
# start_date = "1-1-2021"
# end_date = "6-20-2021"
# days_since="1/1/2021"
# days= 360
#################################
###take unique patients with variant
keep_mrns_variant = np.unique(df[df.variant.isin(voi)]["MRN"])
df_mrns = df[df.MRN.isin(keep_mrns_variant)]
df_mrns = df_mrns[df_mrns.variant.isin(voi)] ###important step--remove non b117 variant
df_mrns.sort_values("COLLECTION_DT",inplace=True)
df_mrns.drop_duplicates("MRN",keep="first",inplace=True)
keep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]["MRN"])
df_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]
df_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin(voi)]
df_mrns_not_variant.sort_values("COLLECTION_DT",inplace=True)
df_mrns_not_variant.drop_duplicates("MRN",keep="first",inplace=True)
df_2 = df_mrns.append(df_mrns_not_variant)
df_2.drop_duplicates("MRN",keep="first",inplace=True)
df = df_2
df=df[['MCoVNumber','COLLECTION_DT','variant']]
#####################################
df.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)
df.COLLECTION_DT = df.COLLECTION_DT.dt.date
df = df[ ( (df.COLLECTION_DT>=pd.to_datetime(start_date)) &
(df.COLLECTION_DT<pd.to_datetime(end_date))
)
]
df.sort_values("COLLECTION_DT",inplace=True)
df.variant.fillna(0,inplace=True)
#########################
df.variant = [1 if x in voi else 0 for x in df.variant]
df_variant = df.groupby("COLLECTION_DT")["variant"].agg("sum").reset_index()
df_count = df.groupby("COLLECTION_DT")["variant"].agg("count").reset_index()
dates = pd.date_range(df.COLLECTION_DT.min(), (df.COLLECTION_DT.max() + timedelta(days=1) )-timedelta(days=1),freq='d')
df_data = pd.DataFrame(dates)
df_data.columns=["dates"]
df_data["date_step"]= [x for x in range(1,df_data.shape[0]+1,1)]
df_data["total"] = df_count.variant
df_data["variant"] = df_variant.variant
df_data["variant_csum"] = np.cumsum(df_variant.variant.values)
df_data["variant_percent"]=[ (x/y)*100 for x,y in zip(df_data.variant,df_data.total)]
df_data.to_excel("final_Data_"+tag+"_log_growth_6_28_2021.xlsx",index=False)
def my_logistic(x,a,b,c):
return c/(1 + a * np.exp(-b*x))
x = np.array(df_data.date_step)
# y = np.array(df_data.variant_csum)
y = np.array(df_data.variant_percent)
##########optimize
po = np.random.exponential(size=3)
bounds = (0,[1000.,2.0,100.])
(a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# for i in range(1,20,1):
# try:
# # po = np.array([250.,0.10,99.])
# po= np.random.exponential(size=3)
# bounds = ([0.,0.1,0.],[1000.,float(i),100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# print(c)
# except:
# print("error for " + str(i))
# po = np.array([250.,0.10,99.])
# bounds = ([0.,0.1,99.],[1000.,1.0,100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
plt.scatter(x,y)
plt.plot(x,my_logistic(x,a,b,c))
xprime = np.array([x for x in range(1,170,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime)
plt.savefig("log_fit_best_fit"+tag+".png")
plt.close()
############################## method 2 using t distribution on error --> perfer this one
from scipy.stats.distributions import t
pars, pcov = (a,b,c),cov
alpha = 0.05 # 95% confidence interval = 100*(1-alpha)
n = len(y) # number of data points
p = len(pars) # number of parameters
dof = max(0, n - p) # number of degrees of freedom
# student-t value for the dof and confidence level
tval = t.ppf(1.0-alpha/2., dof)
val_dw = 0
val_up = 0
for i, p,var in zip(range(n), pars, np.diag(pcov)):
sigma = var**0.5
if i==1:
val_dw = p - sigma*tval
val_up = p + sigma*tval
print ('p{0}: {1} [{2} {3}]'.format(i, p,
p - sigma*tval,
p + sigma*tval))
plt.plot(x,y,'bo',markersize=5,label='Observed')
xprime = np.array([x for x in range(1,days,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime,label='Predicted')
xpred = np.array([x for x in range(1,days,1)])
ypred_dw = my_logistic(xpred,pars[0],val_dw,pars[2])
ypred_up = my_logistic(xpred,pars[0],val_up,pars[2])
plt.fill_between(xpred, ypred_up,ypred_dw,color = 'k', alpha = 0.1,label='95% CI')
plt.title("Logistic growth model ["+tag+"]",fontsize=18)
plt.xlabel("Days since "+days_since,fontsize=15)
plt.ylabel("Percent of patients ",fontsize=15)
plt.legend()
plt.savefig("log_pred_best_fit"+tag+".png")
plt.close()
gr=b;dt = 70/(gr*100);print(dt)
gr=val_up;dt = 70/(gr*100);print(dt)
gr=val_dw;dt = 70/(gr*100);print(dt)
|
flexible
|
{
"blob_id": "dcef5f34a62939d992a109e991552e612bf5bad5",
"index": 4619,
"step-1": "<mask token>\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\n<mask token>\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\n<mask token>\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\n<mask token>\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\n<mask token>\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\n<mask token>\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\n<mask token>\nplt.plot(xprime, yprime, label='Predicted')\n<mask token>\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\n<mask token>\nprint(dt)\n<mask token>\nprint(dt)\n<mask token>\nprint(dt)\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\n<mask token>\nparams = read_run_params()\nrun = params['current_run']\nout_home = params['container'] + 'output/'\nout_dir = out_home + run + '/'\ndf = pd.read_csv(out_dir +\n '4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')\ndf = df[df.quality == 'HQ']\ntag = 'B.1.617.Family'\nvoi = ['B.1.617.2', 'AY.2', 'AY.3']\nstart_date = '4-15-2021'\nend_date = '7-20-2021'\ndays_since = '4/15/2021'\ndays = 180\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)]\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin\n (voi)]\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\ndf = df_2\ndf = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\ndf = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.\n COLLECTION_DT < pd.to_datetime(end_date))]\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\ndf.variant = [(1 if x in voi else 0) for x in df.variant]\ndf_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()\ndf_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()\ndates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +\n timedelta(days=1) - timedelta(days=1), freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns = ['dates']\ndf_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]\ndf_data['total'] = df_count.variant\ndf_data['variant'] = df_variant.variant\ndf_data['variant_csum'] = np.cumsum(df_variant.variant.values)\ndf_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,\n df_data.total)]\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\nx = np.array(df_data.date_step)\ny = np.array(df_data.variant_percent)\npo = np.random.exponential(size=3)\nbounds = 0, [1000.0, 2.0, 100.0]\n(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\nxprime = np.array([x for x in range(1, 170, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\n<mask token>\npars, pcov = (a, b, c), cov\nalpha = 0.05\nn = len(y)\np = len(pars)\ndof = max(0, n - p)\ntval = t.ppf(1.0 - alpha / 2.0, dof)\nval_dw = 0\nval_up = 0\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\nxprime = np.array([x for x in range(1, days, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime, label='Predicted')\nxpred = np.array([x for x in range(1, days, 1)])\nypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])\nypred_up = my_logistic(xpred, pars[0], val_up, pars[2])\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\ngr = b\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_up\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_dw\ndt = 70 / (gr * 100)\nprint(dt)\n",
"step-4": "import pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport scipy.optimize as optim\nfrom scipy import stats\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom gen_utils.gen_io import read_run_params, log_msg\nparams = read_run_params()\nrun = params['current_run']\nout_home = params['container'] + 'output/'\nout_dir = out_home + run + '/'\ndf = pd.read_csv(out_dir +\n '4_mcov_strain_variant_map_covid_pangolin_db_input_' + run + '.csv')\ndf = df[df.quality == 'HQ']\ntag = 'B.1.617.Family'\nvoi = ['B.1.617.2', 'AY.2', 'AY.3']\nstart_date = '4-15-2021'\nend_date = '7-20-2021'\ndays_since = '4/15/2021'\ndays = 180\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)]['MRN'])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)]\ndf_mrns.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns.drop_duplicates('MRN', keep='first', inplace=True)\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]['MRN'])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin\n (voi)]\ndf_mrns_not_variant.sort_values('COLLECTION_DT', inplace=True)\ndf_mrns_not_variant.drop_duplicates('MRN', keep='first', inplace=True)\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates('MRN', keep='first', inplace=True)\ndf = df_2\ndf = df[['MCoVNumber', 'COLLECTION_DT', 'variant']]\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\ndf = df[(df.COLLECTION_DT >= pd.to_datetime(start_date)) & (df.\n COLLECTION_DT < pd.to_datetime(end_date))]\ndf.sort_values('COLLECTION_DT', inplace=True)\ndf.variant.fillna(0, inplace=True)\ndf.variant = [(1 if x in voi else 0) for x in df.variant]\ndf_variant = df.groupby('COLLECTION_DT')['variant'].agg('sum').reset_index()\ndf_count = df.groupby('COLLECTION_DT')['variant'].agg('count').reset_index()\ndates = pd.date_range(df.COLLECTION_DT.min(), df.COLLECTION_DT.max() +\n timedelta(days=1) - timedelta(days=1), freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns = ['dates']\ndf_data['date_step'] = [x for x in range(1, df_data.shape[0] + 1, 1)]\ndf_data['total'] = df_count.variant\ndf_data['variant'] = df_variant.variant\ndf_data['variant_csum'] = np.cumsum(df_variant.variant.values)\ndf_data['variant_percent'] = [(x / y * 100) for x, y in zip(df_data.variant,\n df_data.total)]\ndf_data.to_excel('final_Data_' + tag + '_log_growth_6_28_2021.xlsx', index=\n False)\n\n\ndef my_logistic(x, a, b, c):\n return c / (1 + a * np.exp(-b * x))\n\n\nx = np.array(df_data.date_step)\ny = np.array(df_data.variant_percent)\npo = np.random.exponential(size=3)\nbounds = 0, [1000.0, 2.0, 100.0]\n(a, b, c), cov = optim.curve_fit(my_logistic, x, y, bounds=bounds, p0=po)\nplt.scatter(x, y)\nplt.plot(x, my_logistic(x, a, b, c))\nxprime = np.array([x for x in range(1, 170, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime)\nplt.savefig('log_fit_best_fit' + tag + '.png')\nplt.close()\nfrom scipy.stats.distributions import t\npars, pcov = (a, b, c), cov\nalpha = 0.05\nn = len(y)\np = len(pars)\ndof = max(0, n - p)\ntval = t.ppf(1.0 - alpha / 2.0, dof)\nval_dw = 0\nval_up = 0\nfor i, p, var in zip(range(n), pars, np.diag(pcov)):\n sigma = var ** 0.5\n if i == 1:\n val_dw = p - sigma * tval\n val_up = p + sigma * tval\n print('p{0}: {1} [{2} {3}]'.format(i, p, p - sigma * tval, p + sigma *\n tval))\nplt.plot(x, y, 'bo', markersize=5, label='Observed')\nxprime = np.array([x for x in range(1, days, 1)])\nyprime = my_logistic(xprime, a, b, c)\nplt.plot(xprime, yprime, label='Predicted')\nxpred = np.array([x for x in range(1, days, 1)])\nypred_dw = my_logistic(xpred, pars[0], val_dw, pars[2])\nypred_up = my_logistic(xpred, pars[0], val_up, pars[2])\nplt.fill_between(xpred, ypred_up, ypred_dw, color='k', alpha=0.1, label=\n '95% CI')\nplt.title('Logistic growth model [' + tag + ']', fontsize=18)\nplt.xlabel('Days since ' + days_since, fontsize=15)\nplt.ylabel('Percent of patients ', fontsize=15)\nplt.legend()\nplt.savefig('log_pred_best_fit' + tag + '.png')\nplt.close()\ngr = b\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_up\ndt = 70 / (gr * 100)\nprint(dt)\ngr = val_dw\ndt = 70 / (gr * 100)\nprint(dt)\n",
"step-5": "import pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport scipy.optimize as optim\nfrom scipy import stats\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom gen_utils.gen_io import read_run_params,log_msg\n\n\n\n#############################################\n\nparams = read_run_params()\nrun = params[\"current_run\"]\nout_home = params[\"container\"]+\"output/\" \nout_dir = out_home+run+\"/\"\n\ndf = pd.read_csv(out_dir+\"4_mcov_strain_variant_map_covid_pangolin_db_input_\"+run+\".csv\")\ndf = df[df.quality==\"HQ\"]\n\n\n \n#########################\ntag=\"B.1.617.Family\"\nvoi=[\"B.1.617.2\",\"AY.2\",\"AY.3\"]\nstart_date = \"4-15-2021\"\nend_date = \"7-20-2021\"\ndays_since=\"4/15/2021\"\ndays= 180\n\n# voi=\"P.1\"\n# start_date = \"1-1-2021\"\n# end_date = \"6-20-2021\"\n# days_since=\"1/1/2021\"\n# days= 360\n#################################\n\n\n###take unique patients with variant\nkeep_mrns_variant = np.unique(df[df.variant.isin(voi)][\"MRN\"])\ndf_mrns = df[df.MRN.isin(keep_mrns_variant)]\ndf_mrns = df_mrns[df_mrns.variant.isin(voi)] ###important step--remove non b117 variant \ndf_mrns.sort_values(\"COLLECTION_DT\",inplace=True)\ndf_mrns.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\n\nkeep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)][\"MRN\"])\ndf_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]\ndf_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin(voi)]\ndf_mrns_not_variant.sort_values(\"COLLECTION_DT\",inplace=True)\ndf_mrns_not_variant.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\ndf_2 = df_mrns.append(df_mrns_not_variant)\ndf_2.drop_duplicates(\"MRN\",keep=\"first\",inplace=True)\n\ndf = df_2\n\n\ndf=df[['MCoVNumber','COLLECTION_DT','variant']]\n\n#####################################\n\ndf.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)\ndf.COLLECTION_DT = df.COLLECTION_DT.dt.date\n\n\ndf = df[ ( (df.COLLECTION_DT>=pd.to_datetime(start_date)) &\n (df.COLLECTION_DT<pd.to_datetime(end_date)) \n )\n ]\ndf.sort_values(\"COLLECTION_DT\",inplace=True)\n\ndf.variant.fillna(0,inplace=True)\n#########################\n\ndf.variant = [1 if x in voi else 0 for x in df.variant]\n\n\ndf_variant = df.groupby(\"COLLECTION_DT\")[\"variant\"].agg(\"sum\").reset_index()\ndf_count = df.groupby(\"COLLECTION_DT\")[\"variant\"].agg(\"count\").reset_index()\n\ndates = pd.date_range(df.COLLECTION_DT.min(), (df.COLLECTION_DT.max() + timedelta(days=1) )-timedelta(days=1),freq='d')\ndf_data = pd.DataFrame(dates)\ndf_data.columns=[\"dates\"]\ndf_data[\"date_step\"]= [x for x in range(1,df_data.shape[0]+1,1)]\ndf_data[\"total\"] = df_count.variant\ndf_data[\"variant\"] = df_variant.variant\ndf_data[\"variant_csum\"] = np.cumsum(df_variant.variant.values)\ndf_data[\"variant_percent\"]=[ (x/y)*100 for x,y in zip(df_data.variant,df_data.total)]\ndf_data.to_excel(\"final_Data_\"+tag+\"_log_growth_6_28_2021.xlsx\",index=False)\n\ndef my_logistic(x,a,b,c):\n return c/(1 + a * np.exp(-b*x))\n\nx = np.array(df_data.date_step)\n# y = np.array(df_data.variant_csum)\ny = np.array(df_data.variant_percent)\n\n##########optimize\npo = np.random.exponential(size=3)\nbounds = (0,[1000.,2.0,100.])\n(a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n\n# for i in range(1,20,1):\n# try:\n# # po = np.array([250.,0.10,99.])\n# po= np.random.exponential(size=3)\n# bounds = ([0.,0.1,0.],[1000.,float(i),100.])\n# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n# print(c)\n# except:\n# print(\"error for \" + str(i))\n\n# po = np.array([250.,0.10,99.])\n# bounds = ([0.,0.1,99.],[1000.,1.0,100.])\n# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)\n\nplt.scatter(x,y)\nplt.plot(x,my_logistic(x,a,b,c))\nxprime = np.array([x for x in range(1,170,1)])\nyprime = my_logistic(xprime,a,b,c)\nplt.plot(xprime,yprime)\nplt.savefig(\"log_fit_best_fit\"+tag+\".png\")\nplt.close()\n\n\n############################## method 2 using t distribution on error --> perfer this one \n\nfrom scipy.stats.distributions import t\n\npars, pcov = (a,b,c),cov\n\nalpha = 0.05 # 95% confidence interval = 100*(1-alpha)\n\nn = len(y) # number of data points\np = len(pars) # number of parameters\n\ndof = max(0, n - p) # number of degrees of freedom\n\n# student-t value for the dof and confidence level\ntval = t.ppf(1.0-alpha/2., dof) \n\nval_dw = 0\nval_up = 0\nfor i, p,var in zip(range(n), pars, np.diag(pcov)):\n sigma = var**0.5\n \n if i==1:\n val_dw = p - sigma*tval\n val_up = p + sigma*tval\n\n print ('p{0}: {1} [{2} {3}]'.format(i, p,\n p - sigma*tval,\n p + sigma*tval))\n\n\n\nplt.plot(x,y,'bo',markersize=5,label='Observed')\nxprime = np.array([x for x in range(1,days,1)])\nyprime = my_logistic(xprime,a,b,c)\nplt.plot(xprime,yprime,label='Predicted')\n\nxpred = np.array([x for x in range(1,days,1)])\nypred_dw = my_logistic(xpred,pars[0],val_dw,pars[2])\nypred_up = my_logistic(xpred,pars[0],val_up,pars[2])\n\nplt.fill_between(xpred, ypred_up,ypred_dw,color = 'k', alpha = 0.1,label='95% CI')\n\nplt.title(\"Logistic growth model [\"+tag+\"]\",fontsize=18)\nplt.xlabel(\"Days since \"+days_since,fontsize=15)\nplt.ylabel(\"Percent of patients \",fontsize=15)\n\nplt.legend()\nplt.savefig(\"log_pred_best_fit\"+tag+\".png\")\nplt.close()\n\n\ngr=b;dt = 70/(gr*100);print(dt)\ngr=val_up;dt = 70/(gr*100);print(dt)\ngr=val_dw;dt = 70/(gr*100);print(dt)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(suv_data.head(10))
print('the no of passengers in the list is' + str(len(suv_data.index)))
sns.countplot(x='Purchased', data=suv_data)
sns.countplot(x='Purchased', hue='Gender', data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')
plt.show()
sns.boxplot(x='Gender', y='Age', data=suv_data)
plt.show()
suv_data.drop('User ID', axis=1, inplace=True)
suv_data.columns
suv_data.head(10)
<|reserved_special_token_0|>
print(Gen.head(5))
<|reserved_special_token_0|>
print(suv_data.head(5))
suv_data.drop('Gender', axis=1, inplace=True)
print(suv_data.head(10))
<|reserved_special_token_0|>
logmodel.fit(X_train, y_train)
<|reserved_special_token_0|>
print(predictions)
<|reserved_special_token_0|>
print(classification_report(y_test, predictions))
<|reserved_special_token_0|>
print(confusion_matrix(y_test, predictions))
<|reserved_special_token_0|>
print(accuracy_score(y_test, predictions) * 100)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
suv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')
print(suv_data.head(10))
print('the no of passengers in the list is' + str(len(suv_data.index)))
sns.countplot(x='Purchased', data=suv_data)
sns.countplot(x='Purchased', hue='Gender', data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')
plt.show()
sns.boxplot(x='Gender', y='Age', data=suv_data)
plt.show()
suv_data.drop('User ID', axis=1, inplace=True)
suv_data.columns
suv_data.head(10)
Gen = pd.get_dummies(suv_data['Gender'], drop_first=True)
print(Gen.head(5))
suv_data = pd.concat([suv_data, Gen], axis=1)
print(suv_data.head(5))
suv_data.drop('Gender', axis=1, inplace=True)
print(suv_data.head(10))
X = suv_data.iloc[:, [0, 1, 3]].values
y = suv_data.iloc[:, 2].values
<|reserved_special_token_0|>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
<|reserved_special_token_0|>
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
<|reserved_special_token_0|>
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
print(predictions)
<|reserved_special_token_0|>
print(classification_report(y_test, predictions))
<|reserved_special_token_0|>
print(confusion_matrix(y_test, predictions))
<|reserved_special_token_0|>
print(accuracy_score(y_test, predictions) * 100)
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
import numpy as np
suv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')
print(suv_data.head(10))
print('the no of passengers in the list is' + str(len(suv_data.index)))
sns.countplot(x='Purchased', data=suv_data)
sns.countplot(x='Purchased', hue='Gender', data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')
plt.show()
sns.boxplot(x='Gender', y='Age', data=suv_data)
plt.show()
suv_data.drop('User ID', axis=1, inplace=True)
suv_data.columns
suv_data.head(10)
Gen = pd.get_dummies(suv_data['Gender'], drop_first=True)
print(Gen.head(5))
suv_data = pd.concat([suv_data, Gen], axis=1)
print(suv_data.head(5))
suv_data.drop('Gender', axis=1, inplace=True)
print(suv_data.head(10))
X = suv_data.iloc[:, [0, 1, 3]].values
y = suv_data.iloc[:, 2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,
random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
predictions = logmodel.predict(X_test)
print(predictions)
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, predictions))
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, predictions) * 100)
<|reserved_special_token_1|>
import pandas as pd
import matplotlib.pyplot as plt
import math
import seaborn as sns
import numpy as np
suv_data=pd.read_csv("F:/Development/Machine Learning/suv-data/suv_data.csv")
print(suv_data.head(10))
print("the no of passengers in the list is"+str(len(suv_data.index)))
sns.countplot(x="Purchased",data=suv_data)
sns.countplot(x="Purchased",hue="Gender",data=suv_data)
suv_data['Age'].plot.hist()
suv_data.info()
suv_data['EstimatedSalary'].plot.hist(bins=50,figsize=(10,5))
print(suv_data.isnull())
print(suv_data.isnull().sum())
sns.heatmap(suv_data.isnull(),yticklabels=False,cmap="viridis")
plt.show()
sns.boxplot(x="Gender",y="Age",data=suv_data)
plt.show()
suv_data.drop("User ID",axis=1,inplace=True)
suv_data.columns
suv_data.head(10)
Gen=pd.get_dummies(suv_data['Gender'],drop_first=True)
print(Gen.head(5))
suv_data=pd.concat([suv_data,Gen],axis=1)
print(suv_data.head(5))
suv_data.drop("Gender",axis=1,inplace=True)
print(suv_data.head(10))
X=suv_data.iloc[:,[0,1,3]].values
y=suv_data.iloc[:,2].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
from sklearn.linear_model import LogisticRegression
logmodel=LogisticRegression()
logmodel.fit(X_train, y_train)
predictions=logmodel.predict(X_test)
print(predictions)
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test,predictions))
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test,predictions)*100)
|
flexible
|
{
"blob_id": "c955057d7f8d5289898ecb96a290f5a7d241b787",
"index": 6440,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\n<mask token>\nprint(Gen.head(5))\n<mask token>\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\n<mask token>\nlogmodel.fit(X_train, y_train)\n<mask token>\nprint(predictions)\n<mask token>\nprint(classification_report(y_test, predictions))\n<mask token>\nprint(confusion_matrix(y_test, predictions))\n<mask token>\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-3": "<mask token>\nsuv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen = pd.get_dummies(suv_data['Gender'], drop_first=True)\nprint(Gen.head(5))\nsuv_data = pd.concat([suv_data, Gen], axis=1)\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\nX = suv_data.iloc[:, [0, 1, 3]].values\ny = suv_data.iloc[:, 2].values\n<mask token>\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\n random_state=0)\n<mask token>\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n<mask token>\nlogmodel = LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions = logmodel.predict(X_test)\nprint(predictions)\n<mask token>\nprint(classification_report(y_test, predictions))\n<mask token>\nprint(confusion_matrix(y_test, predictions))\n<mask token>\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport math\nimport seaborn as sns\nimport numpy as np\nsuv_data = pd.read_csv('F:/Development/Machine Learning/suv-data/suv_data.csv')\nprint(suv_data.head(10))\nprint('the no of passengers in the list is' + str(len(suv_data.index)))\nsns.countplot(x='Purchased', data=suv_data)\nsns.countplot(x='Purchased', hue='Gender', data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50, figsize=(10, 5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(), yticklabels=False, cmap='viridis')\nplt.show()\nsns.boxplot(x='Gender', y='Age', data=suv_data)\nplt.show()\nsuv_data.drop('User ID', axis=1, inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen = pd.get_dummies(suv_data['Gender'], drop_first=True)\nprint(Gen.head(5))\nsuv_data = pd.concat([suv_data, Gen], axis=1)\nprint(suv_data.head(5))\nsuv_data.drop('Gender', axis=1, inplace=True)\nprint(suv_data.head(10))\nX = suv_data.iloc[:, [0, 1, 3]].values\ny = suv_data.iloc[:, 2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\n random_state=0)\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlogmodel = LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions = logmodel.predict(X_test)\nprint(predictions)\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test, predictions))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(y_test, predictions))\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test, predictions) * 100)\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt \nimport math\nimport seaborn as sns\nimport numpy as np\nsuv_data=pd.read_csv(\"F:/Development/Machine Learning/suv-data/suv_data.csv\")\nprint(suv_data.head(10))\nprint(\"the no of passengers in the list is\"+str(len(suv_data.index)))\nsns.countplot(x=\"Purchased\",data=suv_data)\nsns.countplot(x=\"Purchased\",hue=\"Gender\",data=suv_data)\nsuv_data['Age'].plot.hist()\nsuv_data.info()\nsuv_data['EstimatedSalary'].plot.hist(bins=50,figsize=(10,5))\nprint(suv_data.isnull())\nprint(suv_data.isnull().sum())\nsns.heatmap(suv_data.isnull(),yticklabels=False,cmap=\"viridis\")\nplt.show()\nsns.boxplot(x=\"Gender\",y=\"Age\",data=suv_data)\nplt.show()\nsuv_data.drop(\"User ID\",axis=1,inplace=True)\nsuv_data.columns\nsuv_data.head(10)\nGen=pd.get_dummies(suv_data['Gender'],drop_first=True)\nprint(Gen.head(5))\nsuv_data=pd.concat([suv_data,Gen],axis=1)\nprint(suv_data.head(5))\nsuv_data.drop(\"Gender\",axis=1,inplace=True)\nprint(suv_data.head(10))\nX=suv_data.iloc[:,[0,1,3]].values\ny=suv_data.iloc[:,2].values\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\nfrom sklearn.preprocessing import StandardScaler\nsc=StandardScaler()\nX_train=sc.fit_transform(X_train)\nX_test=sc.transform(X_test)\nfrom sklearn.linear_model import LogisticRegression\nlogmodel=LogisticRegression()\nlogmodel.fit(X_train, y_train)\npredictions=logmodel.predict(X_test)\nprint(predictions)\nfrom sklearn.metrics import classification_report\nprint(classification_report(y_test,predictions))\nfrom sklearn.metrics import confusion_matrix\nprint(confusion_matrix(y_test,predictions))\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_test,predictions)*100)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='API')
from django.contrib.auth import views as auth_views
urlpatterns = [
path('django-admin/', admin.site.urls),
path('', schema_view),
path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),
path('auth/logout/', auth_views.LogoutView.as_view()),
path('api/auth/', include('apps.auth.urls')),
path('api/polls/', include('apps.polls.urls')),
]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls))
] + urlpatterns
|
normal
|
{
"blob_id": "987d6c769a4f593405e889ed2b0e3f9955900406",
"index": 856,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-3": "<mask token>\nschema_view = get_swagger_view(title='API')\n<mask token>\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='API')\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [path('django-admin/', admin.site.urls), path('', schema_view\n ), path('auth/login/', auth_views.LoginView.as_view(template_name=\n 'auth/login.html')), path('auth/logout/', auth_views.LogoutView.as_view\n ()), path('api/auth/', include('apps.auth.urls')), path('api/polls/',\n include('apps.polls.urls'))]\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-5": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf import settings\n\nfrom rest_framework_swagger.views import get_swagger_view\n\nschema_view = get_swagger_view(title='API')\n\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n path('django-admin/', admin.site.urls),\n path('', schema_view),\n path('auth/login/', auth_views.LoginView.as_view(template_name='auth/login.html')),\n path('auth/logout/', auth_views.LogoutView.as_view()),\n path('api/auth/', include('apps.auth.urls')),\n path('api/polls/', include('apps.polls.urls')),\n]\n\nif settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:\n import debug_toolbar\n urlpatterns = [\n path('__debug__/', include(debug_toolbar.urls))\n ] + urlpatterns\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import csv as csv
import hashlib
from sets import Set
def func_hash(parameter):
hash_object = hashlib.sha384(parameter)
table_hash = hash_object.hexdigest()
return table_hash
def myFunk():
with open('users.csv', 'w') as fp:
a = csv.writer(fp, delimiter=',')
roles = ['inspector', 'admin']
data = [['Userneme', 'hash_password', 'role'],
['Olya', func_hash('Olya'), 'admin'],
['Stas', func_hash('Stas'), 'admin'],
['Dima', func_hash('Dima'), 'admin'],
['Kyrylo', func_hash('Kyrylo'), 'admin'],
['Lubchyk', func_hash('Lubchyk'), 'inspector'],
['Sashko', func_hash('Sashko'),roles],
]
a.writerows(data)
myFunk()
|
normal
|
{
"blob_id": "96d13a883590ca969e997bbb27bcdbee1b24252f",
"index": 2730,
"step-1": "<mask token>\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\nmyFunk()\n",
"step-4": "import csv as csv\nimport hashlib\nfrom sets import Set\n\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\n\ndef myFunk():\n with open('users.csv', 'w') as fp:\n a = csv.writer(fp, delimiter=',')\n roles = ['inspector', 'admin']\n data = [['Userneme', 'hash_password', 'role'], ['Olya', func_hash(\n 'Olya'), 'admin'], ['Stas', func_hash('Stas'), 'admin'], [\n 'Dima', func_hash('Dima'), 'admin'], ['Kyrylo', func_hash(\n 'Kyrylo'), 'admin'], ['Lubchyk', func_hash('Lubchyk'),\n 'inspector'], ['Sashko', func_hash('Sashko'), roles]]\n a.writerows(data)\n\n\nmyFunk()\n",
"step-5": "import csv as csv\nimport hashlib\nfrom sets import Set\n\ndef func_hash(parameter):\n hash_object = hashlib.sha384(parameter)\n table_hash = hash_object.hexdigest()\n return table_hash\n\ndef myFunk():\n\twith open('users.csv', 'w') as fp:\n\t a = csv.writer(fp, delimiter=',')\n\t roles = ['inspector', 'admin']\n\t data = [['Userneme', 'hash_password', 'role'],\n\t ['Olya', func_hash('Olya'), 'admin'],\n\t ['Stas', func_hash('Stas'), 'admin'],\n\t ['Dima', func_hash('Dima'), 'admin'],\n\t ['Kyrylo', func_hash('Kyrylo'), 'admin'],\n\t ['Lubchyk', func_hash('Lubchyk'), 'inspector'],\n\t ['Sashko', func_hash('Sashko'),roles],\n\t ]\n\t a.writerows(data)\n\nmyFunk()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
URL Configuration to test mounting created urls from registries
"""
from django.contrib import admin
from django.urls import include, path
from staticpages.loader import StaticpagesLoader
staticpages_loader = StaticpagesLoader()
urlpatterns = [
path("admin/", admin.site.urls),
# Add base pages urls using the same template
*staticpages_loader.build_urls([
"index",
{
"template_path": "index.html",
"name": "foo",
"extra": "free for use",
},
])
]
# Include another urls map on a sub path
urlpatterns.append(
path("sub/", include("sandbox.staticpages_testapp.sub_urls")),
)
|
normal
|
{
"blob_id": "333914f99face050376e4713ca118f2347e50018",
"index": 989,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-3": "<mask token>\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-4": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom staticpages.loader import StaticpagesLoader\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"step-5": "\"\"\"\nURL Configuration to test mounting created urls from registries\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom staticpages.loader import StaticpagesLoader\n\n\nstaticpages_loader = StaticpagesLoader()\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n # Add base pages urls using the same template\n *staticpages_loader.build_urls([\n \"index\",\n {\n \"template_path\": \"index.html\",\n \"name\": \"foo\",\n \"extra\": \"free for use\",\n },\n ])\n]\n\n# Include another urls map on a sub path\nurlpatterns.append(\n path(\"sub/\", include(\"sandbox.staticpages_testapp.sub_urls\")),\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FilebasedUniqueConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FilebasedUniqueConfig(AppConfig):
name = 'papermerge.filebased_unique'
label = 'filebased_unique'
<|reserved_special_token_1|>
from django.apps import AppConfig
class FilebasedUniqueConfig(AppConfig):
name = 'papermerge.filebased_unique'
label = 'filebased_unique'
|
flexible
|
{
"blob_id": "2d17229afe154937132c1e4f8c138896da34ab61",
"index": 1430,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FilebasedUniqueConfig(AppConfig):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FilebasedUniqueConfig(AppConfig):\n name = 'papermerge.filebased_unique'\n label = 'filebased_unique'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass FilebasedUniqueConfig(AppConfig):\n name = 'papermerge.filebased_unique'\n label = 'filebased_unique'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os, sys
from scrapy.cmdline import execute
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute('scrapy crawl laptop'.split())
|
normal
|
{
"blob_id": "71ff8e8a62a3b2731071ed7a039b51c150ebaca4",
"index": 3671,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nexecute('scrapy crawl laptop'.split())\n",
"step-3": "import os, sys\nfrom scrapy.cmdline import execute\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\nexecute('scrapy crawl laptop'.split())\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def download_image(url: str) ->bool:
img_tag_regex = '<img.*?src="(.*?)"[^\\>]+>'
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace('https://', '').replace('http://', ''), 'wb'
) as file:
file.write(resp.content)
return True
<|reserved_special_token_1|>
import re
import requests
def download_image(url: str) ->bool:
img_tag_regex = '<img.*?src="(.*?)"[^\\>]+>'
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace('https://', '').replace('http://', ''), 'wb'
) as file:
file.write(resp.content)
return True
<|reserved_special_token_1|>
import re
import requests
def download_image(url: str) -> bool:
img_tag_regex = r"""<img.*?src="(.*?)"[^\>]+>"""
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace("https://", "").replace("http://", ""), "wb") as file:
file.write(resp.content)
return True
|
flexible
|
{
"blob_id": "268c36f6fb99383ea02b7ee406189ffb467d246c",
"index": 6554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-3": "import re\nimport requests\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-4": "import re\n\nimport requests\n\n\ndef download_image(url: str) -> bool:\n img_tag_regex = r\"\"\"<img.*?src=\"(.*?)\"[^\\>]+>\"\"\"\n\n response = requests.get(url)\n if response.status_code != 200:\n return False\n\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace(\"https://\", \"\").replace(\"http://\", \"\"), \"wb\") as file:\n file.write(resp.content)\n\n return True\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
from math import ceil, floor, sqrt
def palindromes(n: int) -> int:
"""yield successive palindromes starting at n"""
# 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101
# 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212
# 989 -> 999 -> 1001 -> 1111 -> 1221
# 9889 -> 9999 -> 10001 -> 10101 -> 10201
prev = n
s = str(n)
even = len(s) % 2 == 0
s = s[:ceil(len(s) / 2)]
n = int(s)
while True:
if even:
pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'
else:
pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'
if prev <= pal:
yield pal
n += 1
if all(digit == '9' for digit in s):
even = not even
if even: n //= 10
s = str(n)
def isPrime(n: int) -> bool:
if n < 2:
return False
for i in range(2, floor(sqrt(n)) + 1):
if n % i == 0:
return False
return True
class Solution:
def primePalindrome(self, N: int) -> int:
"""return lowest prime palindrome >= N"""
for p in palindromes(N):
if isPrime(p):
return p
|
normal
|
{
"blob_id": "b07073a7f65dbc10806b68729f21a8bc8773a1ab",
"index": 3836,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def primePalindrome(self, N: int) ->int:\n \"\"\"return lowest prime palindrome >= N\"\"\"\n for p in palindromes(N):\n if isPrime(p):\n return p\n",
"step-2": "<mask token>\n\n\ndef palindromes(n: int) ->int:\n \"\"\"yield successive palindromes starting at n\"\"\"\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]]))\n else:\n pal = int(''.join([s, s[-2::-1]]))\n if prev <= pal:\n yield pal\n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even:\n n //= 10\n s = str(n)\n\n\n<mask token>\n\n\nclass Solution:\n\n def primePalindrome(self, N: int) ->int:\n \"\"\"return lowest prime palindrome >= N\"\"\"\n for p in palindromes(N):\n if isPrime(p):\n return p\n",
"step-3": "<mask token>\n\n\ndef palindromes(n: int) ->int:\n \"\"\"yield successive palindromes starting at n\"\"\"\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]]))\n else:\n pal = int(''.join([s, s[-2::-1]]))\n if prev <= pal:\n yield pal\n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even:\n n //= 10\n s = str(n)\n\n\ndef isPrime(n: int) ->bool:\n if n < 2:\n return False\n for i in range(2, floor(sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\nclass Solution:\n\n def primePalindrome(self, N: int) ->int:\n \"\"\"return lowest prime palindrome >= N\"\"\"\n for p in palindromes(N):\n if isPrime(p):\n return p\n",
"step-4": "from math import ceil, floor, sqrt\n\n\ndef palindromes(n: int) ->int:\n \"\"\"yield successive palindromes starting at n\"\"\"\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]]))\n else:\n pal = int(''.join([s, s[-2::-1]]))\n if prev <= pal:\n yield pal\n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even:\n n //= 10\n s = str(n)\n\n\ndef isPrime(n: int) ->bool:\n if n < 2:\n return False\n for i in range(2, floor(sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n\n\nclass Solution:\n\n def primePalindrome(self, N: int) ->int:\n \"\"\"return lowest prime palindrome >= N\"\"\"\n for p in palindromes(N):\n if isPrime(p):\n return p\n",
"step-5": "#!/usr/bin/env python\n\nfrom math import ceil, floor, sqrt\n\ndef palindromes(n: int) -> int:\n \"\"\"yield successive palindromes starting at n\"\"\"\n # 1 -> 2 -> 3 ... 9 -> 11 -> 22 -> 33 -> 44 .. 99 -> 101\n # 101 -> 111 -> 121 -> 131 -> ... -> 191 -> 202 -> 212\n # 989 -> 999 -> 1001 -> 1111 -> 1221\n # 9889 -> 9999 -> 10001 -> 10101 -> 10201\n prev = n\n s = str(n)\n even = len(s) % 2 == 0\n s = s[:ceil(len(s) / 2)]\n n = int(s)\n while True:\n if even:\n pal = int(''.join([s, s[-1::-1]])) # join '12' with '21'\n else:\n pal = int(''.join([s, s[-2::-1]])) # join '12' with '1'\n if prev <= pal:\n yield pal\n \n n += 1\n if all(digit == '9' for digit in s):\n even = not even\n if even: n //= 10\n s = str(n)\n\ndef isPrime(n: int) -> bool:\n if n < 2:\n return False\n for i in range(2, floor(sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True\n \n\nclass Solution:\n def primePalindrome(self, N: int) -> int:\n \"\"\"return lowest prime palindrome >= N\"\"\"\n for p in palindromes(N):\n if isPrime(p):\n return p\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from .hacker import HackerRegistrationPage
from .judge import JudgeRegistrationPage
from .mentor import MentorRegistrationPage
from .organizer import OrganizerRegistrationPage
from .user import UserRegistrationPage
|
normal
|
{
"blob_id": "34f3212b0254cbcb5e1ca535a29d4fe820dcaad8",
"index": 2978,
"step-1": "<mask token>\n",
"step-2": "from .hacker import HackerRegistrationPage\nfrom .judge import JudgeRegistrationPage\nfrom .mentor import MentorRegistrationPage\nfrom .organizer import OrganizerRegistrationPage\nfrom .user import UserRegistrationPage\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import cv2
import numpy as np
from pycocotools.coco import maskUtils
# from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
# from dataset.base_dataflow import Meta
from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata
from dataset.base_dataflow import Meta
def read_img(components):
"""
Loads image from meta.img_path. Assigns the image to
the field img of the same meta instance.
:param components: components
:return: updated components
"""
img_buf = open(components[0], 'rb').read()
if not img_buf:
raise Exception('image not read, path=%s' % components[0])
arr = np.fromstring(img_buf, np.uint8)
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
components[1], components[2] = img.shape[:2]
components[10] = img
return components
def gen_mask(components):
"""
Generate masks based on the coco mask polygons.
:param components: components
:return: updated components
"""
masks_segments = components[7]
hh = components[1]
ww = components[2]
if masks_segments:
mask_miss = np.ones((hh, ww), dtype=np.uint8)
for seg in masks_segments:
bin_mask = maskUtils.decode(seg)
bin_mask = np.logical_not(bin_mask)
mask_miss = np.bitwise_and(mask_miss, bin_mask)
components[11] = mask_miss
return components
# components == df
# seems params' type is list
def augment(components, augmentors,use_o=False):
"""
Augmenting of images.
:param components: components
:return: updated components.
"""
img_path = components[0]
height = components[1]
width = components[2]
center = components[3]
bbox = components[4]
area = components[5]
num_keypoints = components[6]
masks_segments = components[7]
scale = components[8]
all_joints = components[9]
img = components[10]
mask = components[11]
aug_center = components[12]
aug_joints = components[13]
idx = components[14]
meta = Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
meta.masks_segments = masks_segments
meta.all_joints = all_joints
meta.img = img
meta.mask = mask
meta.aug_center = aug_center
meta.aug_joints = aug_joints
aug_center = meta.center.copy()
aug_joints = joints_to_point8(meta.all_joints)
if idx % 2 == 1:
# print(f"ori: {idx//2}, {idx}")
o_meta= Meta(img_path, height, width, center, bbox,
area, scale, num_keypoints)
o_meta.all_joints=all_joints
o_meta.img=img
o_meta.mask=mask
o_meta.aug_center=aug_center
o_meta.aug_joints=aug_joints
o_aug_center=o_meta.center.copy()
o_aug_joints=joints_to_point8(o_meta.all_joints)
o_trans=augmentors[4].get_transform(AugImgMetadata(
img=o_meta.img,
mask = o_meta.mask,
center=o_aug_center,
scale=o_meta.scale
))
o_img,o_mask=o_trans.apply_image(o_meta)
o_aug_joints = o_trans.apply_coords(o_aug_joints)
# o_aug_center = o_trans.apply_coords(o_aug_center)
# o_meta.img=o_img
# o_meta.mask=mask
o_meta.aug_joints=point8_to_joints(o_aug_joints)
# o_meta.aug_center=o_aug_center
return [o_img,o_meta.aug_joints]
else:
for aug in augmentors:
transformation = aug.get_transform(
AugImgMetadata(img=meta.img,
mask=meta.mask,
center=aug_center,
scale=meta.scale))
im, mask = transformation.apply_image(meta)
# augment joints
aug_joints = transformation.apply_coords(aug_joints)
# after flipping horizontaly the left side joints and right side joints are also
# flipped so we need to recover their orginal orientation.
if isinstance(transformation, FlipTransform):
aug_joints = transformation.recover_left_right(aug_joints)
# augment center position
aug_center = transformation.apply_coords(aug_center)
meta.img = im
meta.mask = mask
meta.aug_joints = point8_to_joints(aug_joints)
meta.aug_center = aug_center
back_img=meta.img
back_aug_joints = meta.aug_joints
# del meta
# return [[back_img,back_aug_joints],
# [o_meta.img,o_meta.aug_joints]]
return [back_img,back_aug_joints]
def apply_mask(components):
"""
Applies the mask (if exists) to the image.
:param components: components
:return: updated components
"""
img = components[10]
mask = components[11]
if mask is not None:
img[:, :, 0] = img[:, :, 0] * mask
img[:, :, 1] = img[:, :, 1] * mask
img[:, :, 2] = img[:, :, 2] * mask
img[img == 0] = 128
return components
def create_all_mask(mask, num, stride):
"""
Helper function to create a stack of scaled down mask.
:param mask: mask image
:param num: number of layers
:param stride: parameter used to scale down the mask image because it has
the same size as orginal image. We need the size of network output.
:return:
"""
scale_factor = 1.0 / stride
small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)
small_mask = small_mask[:, :, np.newaxis]
return np.repeat(small_mask, num, axis=2)
|
normal
|
{
"blob_id": "e47223622a2718830d830dbb779800659d659ae3",
"index": 8472,
"step-1": "<mask token>\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\n<mask token>\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-2": "<mask token>\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\n<mask token>\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-3": "<mask token>\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-4": "import cv2\nimport numpy as np\nfrom pycocotools.coco import maskUtils\nfrom dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\nfrom dataset.base_dataflow import Meta\n\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n img_buf = open(components[0], 'rb').read()\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n components[11] = mask_miss\n return components\n\n\ndef augment(components, augmentors, use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n if idx % 2 == 1:\n o_meta = Meta(img_path, height, width, center, bbox, area, scale,\n num_keypoints)\n o_meta.all_joints = all_joints\n o_meta.img = img\n o_meta.mask = mask\n o_meta.aug_center = aug_center\n o_meta.aug_joints = aug_joints\n o_aug_center = o_meta.center.copy()\n o_aug_joints = joints_to_point8(o_meta.all_joints)\n o_trans = augmentors[4].get_transform(AugImgMetadata(img=o_meta.img,\n mask=o_meta.mask, center=o_aug_center, scale=o_meta.scale))\n o_img, o_mask = o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n o_meta.aug_joints = point8_to_joints(o_aug_joints)\n return [o_img, o_meta.aug_joints]\n else:\n for aug in augmentors:\n transformation = aug.get_transform(AugImgMetadata(img=meta.img,\n mask=meta.mask, center=aug_center, scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n aug_joints = transformation.apply_coords(aug_joints)\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n aug_center = transformation.apply_coords(aug_center)\n meta.img = im\n meta.mask = mask\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n back_img = meta.img\n back_aug_joints = meta.aug_joints\n return [back_img, back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor,\n interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-5": "import cv2\nimport numpy as np\n\nfrom pycocotools.coco import maskUtils\n\n# from dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\n\n# from dataset.base_dataflow import Meta\n\nfrom dataset.augmentors import FlipTransform, joints_to_point8, point8_to_joints, AugImgMetadata\n\nfrom dataset.base_dataflow import Meta\n\ndef read_img(components):\n \"\"\"\n Loads image from meta.img_path. Assigns the image to\n the field img of the same meta instance.\n\n :param components: components\n :return: updated components\n \"\"\"\n\n img_buf = open(components[0], 'rb').read()\n\n if not img_buf:\n raise Exception('image not read, path=%s' % components[0])\n\n arr = np.fromstring(img_buf, np.uint8)\n img = cv2.imdecode(arr, cv2.IMREAD_COLOR)\n components[1], components[2] = img.shape[:2]\n components[10] = img\n\n return components\n\n\ndef gen_mask(components):\n \"\"\"\n Generate masks based on the coco mask polygons.\n\n :param components: components\n :return: updated components\n \"\"\"\n masks_segments = components[7]\n hh = components[1]\n ww = components[2]\n\n if masks_segments:\n mask_miss = np.ones((hh, ww), dtype=np.uint8)\n for seg in masks_segments:\n bin_mask = maskUtils.decode(seg)\n bin_mask = np.logical_not(bin_mask)\n mask_miss = np.bitwise_and(mask_miss, bin_mask)\n\n components[11] = mask_miss\n\n return components\n\n\n# components == df\n# seems params' type is list\ndef augment(components, augmentors,use_o=False):\n \"\"\"\n Augmenting of images.\n\n :param components: components\n :return: updated components.\n \"\"\"\n \n img_path = components[0]\n height = components[1]\n width = components[2]\n center = components[3]\n bbox = components[4]\n area = components[5]\n num_keypoints = components[6]\n masks_segments = components[7]\n scale = components[8]\n all_joints = components[9]\n img = components[10]\n mask = components[11]\n aug_center = components[12]\n aug_joints = components[13]\n idx = components[14]\n\n meta = Meta(img_path, height, width, center, bbox,\n area, scale, num_keypoints)\n meta.masks_segments = masks_segments\n meta.all_joints = all_joints\n meta.img = img\n meta.mask = mask\n meta.aug_center = aug_center\n meta.aug_joints = aug_joints\n\n aug_center = meta.center.copy()\n aug_joints = joints_to_point8(meta.all_joints)\n\n if idx % 2 == 1:\n # print(f\"ori: {idx//2}, {idx}\")\n o_meta= Meta(img_path, height, width, center, bbox,\n area, scale, num_keypoints)\n o_meta.all_joints=all_joints\n o_meta.img=img\n o_meta.mask=mask\n o_meta.aug_center=aug_center\n o_meta.aug_joints=aug_joints\n \n o_aug_center=o_meta.center.copy()\n o_aug_joints=joints_to_point8(o_meta.all_joints)\n \n o_trans=augmentors[4].get_transform(AugImgMetadata(\n img=o_meta.img,\n mask = o_meta.mask,\n center=o_aug_center,\n scale=o_meta.scale\n ))\n \n o_img,o_mask=o_trans.apply_image(o_meta)\n o_aug_joints = o_trans.apply_coords(o_aug_joints)\n # o_aug_center = o_trans.apply_coords(o_aug_center)\n # o_meta.img=o_img\n # o_meta.mask=mask\n o_meta.aug_joints=point8_to_joints(o_aug_joints)\n # o_meta.aug_center=o_aug_center\n return [o_img,o_meta.aug_joints]\n \n else:\n\n for aug in augmentors:\n transformation = aug.get_transform(\n AugImgMetadata(img=meta.img,\n mask=meta.mask,\n center=aug_center,\n scale=meta.scale))\n im, mask = transformation.apply_image(meta)\n\n # augment joints\n aug_joints = transformation.apply_coords(aug_joints)\n\n # after flipping horizontaly the left side joints and right side joints are also\n # flipped so we need to recover their orginal orientation.\n if isinstance(transformation, FlipTransform):\n aug_joints = transformation.recover_left_right(aug_joints)\n\n # augment center position\n aug_center = transformation.apply_coords(aug_center)\n\n meta.img = im\n meta.mask = mask\n\n meta.aug_joints = point8_to_joints(aug_joints)\n meta.aug_center = aug_center\n\n back_img=meta.img\n back_aug_joints = meta.aug_joints\n # del meta\n\n # return [[back_img,back_aug_joints],\n # [o_meta.img,o_meta.aug_joints]]\n\n return [back_img,back_aug_joints]\n\n\ndef apply_mask(components):\n \"\"\"\n Applies the mask (if exists) to the image.\n\n :param components: components\n :return: updated components\n \"\"\"\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components\n\n\ndef create_all_mask(mask, num, stride):\n \"\"\"\n Helper function to create a stack of scaled down mask.\n\n :param mask: mask image\n :param num: number of layers\n :param stride: parameter used to scale down the mask image because it has\n the same size as orginal image. We need the size of network output.\n :return:\n \"\"\"\n scale_factor = 1.0 / stride\n small_mask = cv2.resize(mask, (0, 0), fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_CUBIC)\n small_mask = small_mask[:, :, np.newaxis]\n return np.repeat(small_mask, num, axis=2)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from django.test import TestCase
from ..models import FearConditioningData, FearConditioningModule
from ..registry import DataViewsetRegistry, ModuleRegistry
class ModuleRegistryTest(TestCase):
def test_register_module_create_view(self) -> None:
registry = ModuleRegistry()
registry.register(FearConditioningModule)
self.assertEqual(
registry.urls[0].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/modules/"
"fear-conditioning/add/",
)
self.assertEqual(
registry.urls[0].callback, registry.views["fear_conditioning_create"]
)
self.assertEqual(registry.urls[0].name, "fear_conditioning_create")
self.assertEqual(registry.modules, [FearConditioningModule])
class DataViewsetRegistryTest(TestCase):
def test_register_data_model(self) -> None:
registry = DataViewsetRegistry()
registry.register(FearConditioningData)
self.assertEqual(registry.data_models, [FearConditioningData])
# List view
self.assertEqual(
registry.urls[0].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/"
"fear-conditioning/",
)
self.assertEqual(
registry.urls[0].callback, registry.views["fear_conditioning_data_list"]
)
self.assertEqual(registry.urls[0].name, "fear_conditioning_data_list")
# Detail view
self.assertEqual(
registry.urls[1].pattern._route,
"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/"
"fear-conditioning/<int:data_pk>/",
)
self.assertEqual(
registry.urls[1].callback, registry.views["fear_conditioning_data_detail"]
)
self.assertEqual(registry.urls[1].name, "fear_conditioning_data_detail")
|
normal
|
{
"blob_id": "14cc048f517efd3dad9960f35fff66a78f68fb45",
"index": 8975,
"step-1": "<mask token>\n\n\nclass DataViewsetRegistryTest(TestCase):\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DataViewsetRegistryTest(TestCase):\n\n def test_register_data_model(self) ->None:\n registry = DataViewsetRegistry()\n registry.register(FearConditioningData)\n self.assertEqual(registry.data_models, [FearConditioningData])\n self.assertEqual(registry.urls[0].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/'\n )\n self.assertEqual(registry.urls[0].callback, registry.views[\n 'fear_conditioning_data_list'])\n self.assertEqual(registry.urls[0].name, 'fear_conditioning_data_list')\n self.assertEqual(registry.urls[1].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/<int:data_pk>/'\n )\n self.assertEqual(registry.urls[1].callback, registry.views[\n 'fear_conditioning_data_detail'])\n self.assertEqual(registry.urls[1].name, 'fear_conditioning_data_detail'\n )\n",
"step-3": "<mask token>\n\n\nclass ModuleRegistryTest(TestCase):\n\n def test_register_module_create_view(self) ->None:\n registry = ModuleRegistry()\n registry.register(FearConditioningModule)\n self.assertEqual(registry.urls[0].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/modules/fear-conditioning/add/'\n )\n self.assertEqual(registry.urls[0].callback, registry.views[\n 'fear_conditioning_create'])\n self.assertEqual(registry.urls[0].name, 'fear_conditioning_create')\n self.assertEqual(registry.modules, [FearConditioningModule])\n\n\nclass DataViewsetRegistryTest(TestCase):\n\n def test_register_data_model(self) ->None:\n registry = DataViewsetRegistry()\n registry.register(FearConditioningData)\n self.assertEqual(registry.data_models, [FearConditioningData])\n self.assertEqual(registry.urls[0].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/'\n )\n self.assertEqual(registry.urls[0].callback, registry.views[\n 'fear_conditioning_data_list'])\n self.assertEqual(registry.urls[0].name, 'fear_conditioning_data_list')\n self.assertEqual(registry.urls[1].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/<int:data_pk>/'\n )\n self.assertEqual(registry.urls[1].callback, registry.views[\n 'fear_conditioning_data_detail'])\n self.assertEqual(registry.urls[1].name, 'fear_conditioning_data_detail'\n )\n",
"step-4": "from django.test import TestCase\nfrom ..models import FearConditioningData, FearConditioningModule\nfrom ..registry import DataViewsetRegistry, ModuleRegistry\n\n\nclass ModuleRegistryTest(TestCase):\n\n def test_register_module_create_view(self) ->None:\n registry = ModuleRegistry()\n registry.register(FearConditioningModule)\n self.assertEqual(registry.urls[0].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/modules/fear-conditioning/add/'\n )\n self.assertEqual(registry.urls[0].callback, registry.views[\n 'fear_conditioning_create'])\n self.assertEqual(registry.urls[0].name, 'fear_conditioning_create')\n self.assertEqual(registry.modules, [FearConditioningModule])\n\n\nclass DataViewsetRegistryTest(TestCase):\n\n def test_register_data_model(self) ->None:\n registry = DataViewsetRegistry()\n registry.register(FearConditioningData)\n self.assertEqual(registry.data_models, [FearConditioningData])\n self.assertEqual(registry.urls[0].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/'\n )\n self.assertEqual(registry.urls[0].callback, registry.views[\n 'fear_conditioning_data_list'])\n self.assertEqual(registry.urls[0].name, 'fear_conditioning_data_list')\n self.assertEqual(registry.urls[1].pattern._route,\n 'projects/<int:project_pk>/experiments/<int:experiment_pk>/data/fear-conditioning/<int:data_pk>/'\n )\n self.assertEqual(registry.urls[1].callback, registry.views[\n 'fear_conditioning_data_detail'])\n self.assertEqual(registry.urls[1].name, 'fear_conditioning_data_detail'\n )\n",
"step-5": "from django.test import TestCase\n\nfrom ..models import FearConditioningData, FearConditioningModule\nfrom ..registry import DataViewsetRegistry, ModuleRegistry\n\n\nclass ModuleRegistryTest(TestCase):\n def test_register_module_create_view(self) -> None:\n registry = ModuleRegistry()\n\n registry.register(FearConditioningModule)\n\n self.assertEqual(\n registry.urls[0].pattern._route,\n \"projects/<int:project_pk>/experiments/<int:experiment_pk>/modules/\"\n \"fear-conditioning/add/\",\n )\n self.assertEqual(\n registry.urls[0].callback, registry.views[\"fear_conditioning_create\"]\n )\n self.assertEqual(registry.urls[0].name, \"fear_conditioning_create\")\n self.assertEqual(registry.modules, [FearConditioningModule])\n\n\nclass DataViewsetRegistryTest(TestCase):\n def test_register_data_model(self) -> None:\n registry = DataViewsetRegistry()\n\n registry.register(FearConditioningData)\n\n self.assertEqual(registry.data_models, [FearConditioningData])\n\n # List view\n self.assertEqual(\n registry.urls[0].pattern._route,\n \"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/\"\n \"fear-conditioning/\",\n )\n self.assertEqual(\n registry.urls[0].callback, registry.views[\"fear_conditioning_data_list\"]\n )\n self.assertEqual(registry.urls[0].name, \"fear_conditioning_data_list\")\n\n # Detail view\n self.assertEqual(\n registry.urls[1].pattern._route,\n \"projects/<int:project_pk>/experiments/<int:experiment_pk>/data/\"\n \"fear-conditioning/<int:data_pk>/\",\n )\n self.assertEqual(\n registry.urls[1].callback, registry.views[\"fear_conditioning_data_detail\"]\n )\n self.assertEqual(registry.urls[1].name, \"fear_conditioning_data_detail\")\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
<|reserved_special_token_0|>
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
<|reserved_special_token_1|>
from __future__ import print_function, with_statement
<|reserved_special_token_0|>
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1 == str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(), str2.lower())
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, with_statement
"""
cosi299a- Cinderella
[email protected]
"""
def truecase_is(string):
""" -> lower/title/upper/other """
if string.islower():
return 'l'
if string.istitle():
return 't'
if string.isupper():
return 'u'
return 'o'
def alnum_is(string):
""" -> alpha/digit/other """ #assumption: only alnum strings analyzed
if string.isalpha():
return 'a'
if string.isdigit():
return 'd'
return 'o'
def truecase_matching_is(str1, str2):
""" -> f(ull-string)/s(ub-string)/n(one) """
if str1==str2:
return 'f'
if str1 in str2:
return 's'
return 'n'
def lowercase_matching_is(str1, str2):
return truecase_matching_is(str1.lower(),str2.lower())
|
flexible
|
{
"blob_id": "75ddcdd4e80b962198ff9de1d996837927c3ac1a",
"index": 824,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\n<mask token>\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n",
"step-3": "<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\"\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n",
"step-4": "from __future__ import print_function, with_statement\n<mask token>\n\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\"\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1 == str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(), str2.lower())\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nfrom __future__ import print_function, with_statement\n\n\n\"\"\"\ncosi299a- Cinderella\[email protected]\n\"\"\"\n\ndef truecase_is(string):\n \"\"\" -> lower/title/upper/other \"\"\"\n if string.islower():\n return 'l'\n if string.istitle():\n return 't'\n if string.isupper():\n return 'u'\n return 'o'\n\ndef alnum_is(string):\n \"\"\" -> alpha/digit/other \"\"\" #assumption: only alnum strings analyzed\n if string.isalpha():\n return 'a'\n if string.isdigit():\n return 'd'\n return 'o'\n\ndef truecase_matching_is(str1, str2):\n \"\"\" -> f(ull-string)/s(ub-string)/n(one) \"\"\"\n if str1==str2:\n return 'f'\n if str1 in str2:\n return 's'\n return 'n'\n\ndef lowercase_matching_is(str1, str2):\n return truecase_matching_is(str1.lower(),str2.lower())\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',views.index,name='index'),
path('sign',views.sign,name='sign'),
# path('password_reset/',auth_views.PasswordResetView.as_view(),name='password_reset'),
# path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(),name='password_reset_done'),
# path('reset/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(),name='password_reset_confirm'),
# path('reset/done/',auth_views.PasswordResetCompleteView.as_view(),name='password_reset_complete'),
# path(
# 'change-password',
# auth_views.PasswordChangeView.as_view(
# template_name='common/change-password.html',
# success_url='/'
# ),
# name='change-password'
# ),
path('reset_password/',
auth_views.PasswordResetView.as_view(template_name="password_reset.html"),
name="password_reset" ),
path('reset_password_sent/',
auth_views.PasswordResetDoneView.as_view(template_name="password_reset_sent.html"),
name='password_reset_done'),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name="password_reset_form.html"),
name='password_reset_confirm'),
path('reset_password_complete/',
auth_views.PasswordResetCompleteView.as_view(template_name="password_reset_done.html"),
name='password_reset_complete'),
]
|
normal
|
{
"blob_id": "7e35c35c8ef443155c45bdbff4ce9ad07b99f144",
"index": 9983,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.index, name='index'), path('sign', views.sign,\n name='sign'), path('reset_password/', auth_views.PasswordResetView.\n as_view(template_name='password_reset.html'), name='password_reset'),\n path('reset_password_sent/', auth_views.PasswordResetDoneView.as_view(\n template_name='password_reset_sent.html'), name='password_reset_done'),\n path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.\n as_view(template_name='password_reset_form.html'), name=\n 'password_reset_confirm'), path('reset_password_complete/', auth_views.\n PasswordResetCompleteView.as_view(template_name=\n 'password_reset_done.html'), name='password_reset_complete')]\n",
"step-3": "from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views\nurlpatterns = [path('', views.index, name='index'), path('sign', views.sign,\n name='sign'), path('reset_password/', auth_views.PasswordResetView.\n as_view(template_name='password_reset.html'), name='password_reset'),\n path('reset_password_sent/', auth_views.PasswordResetDoneView.as_view(\n template_name='password_reset_sent.html'), name='password_reset_done'),\n path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.\n as_view(template_name='password_reset_form.html'), name=\n 'password_reset_confirm'), path('reset_password_complete/', auth_views.\n PasswordResetCompleteView.as_view(template_name=\n 'password_reset_done.html'), name='password_reset_complete')]\n",
"step-4": "from django.urls import path\nfrom . import views\nfrom django.contrib.auth import views as auth_views \n\nurlpatterns = [\n path('',views.index,name='index'),\n path('sign',views.sign,name='sign'),\n # path('password_reset/',auth_views.PasswordResetView.as_view(),name='password_reset'),\n # path('password_reset/done/',auth_views.PasswordResetDoneView.as_view(),name='password_reset_done'),\n # path('reset/<uidb64>/<token>/',auth_views.PasswordResetConfirmView.as_view(),name='password_reset_confirm'),\n # path('reset/done/',auth_views.PasswordResetCompleteView.as_view(),name='password_reset_complete'),\n\n # path(\n # 'change-password',\n # auth_views.PasswordChangeView.as_view(\n # template_name='common/change-password.html',\n # success_url='/'\n # ),\n # name='change-password'\n # ),\n\n path('reset_password/',\n auth_views.PasswordResetView.as_view(template_name=\"password_reset.html\"),\n name=\"password_reset\" ),\n \n path('reset_password_sent/',\n auth_views.PasswordResetDoneView.as_view(template_name=\"password_reset_sent.html\"),\n name='password_reset_done'),\n\n path('reset/<uidb64>/<token>/',\n auth_views.PasswordResetConfirmView.as_view(template_name=\"password_reset_form.html\"),\n name='password_reset_confirm'),\n \n path('reset_password_complete/',\n auth_views.PasswordResetCompleteView.as_view(template_name=\"password_reset_done.html\"),\n name='password_reset_complete'),\n\n\n \n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# from django.shortcuts import render
# from django.http import HttpResponse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.views import generic
from django.urls import reverse_lazy
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_exempt
import json
from . import models
from django.utils import timezone
from questions.forms import UserRegistrationForm, UserLoginForm, UserSettingsForm, AskForm, AnswerForm, UserForm
# from .models import Post
# Create your views here.
def index(request):
return render(request, 'new_questions.html', {
'title': 'Вопросы',
'questions': paginate(request, models.Question.objects.all()),
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'page_objects' : paginate(request, models.Question.objects.all()),
})
def top(request):
return render(request, 'new_questions.html', {
'title': 'Топ вопросов',
'questions': paginate(request, models.Question.objects.get_hot()),
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'page_objects' : paginate(request, models.Question.objects.get_hot()),
})
def new(request):
return render(request, 'new_questions.html', {
'title': 'Новые',
'questions': paginate(request, models.Question.objects.get_new()),
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'page_objects' : paginate(request, models.Question.objects.get_new()),
})
def hot(request, id=1):
"""docstring for Main_menu"""
return render(request, "hot.html", {
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
"questions" : paginate(request, objects_list = models.Question.objects.get_hot()),
"page_objects" : paginate(request, objects_list = models.Question.objects.get_hot()),
})
def profile(request, id):
return render(request, "user_settings.html", {
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
"profile": get_object_or_404(models.CustomUser, pk=id),
})
def user_questions(request, id): #Переделай вид страницы! не красиво!
"""docstring for Main_menu"""
return render(request, "user_question.html", {
'questions': paginate(request, models.Question.objects.get_by_user(user_id=id)),
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'page_objects' : paginate(request, models.Question.objects.get_by_user(user_id=id)),
})
def question_page(request, id):
return render(request, "questions.html", {
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
"question": get_object_or_404(models.Question, pk=id) ,
"answers": paginate(request, objects_list = models.Answer.objects.get_hot_for_answer(id)),
"page_objects": paginate(request, objects_list = models.Answer.objects.get_hot_for_answer(id)),
})
def tag(request, id):
return render(request, 'tag_find.html', {
'users' : paginate(request, models.CustomUser.objects.by_rating())[0:10],
'tags' : paginate(request, models.Tag.objects.hottest())[0:10],
'tag' : get_object_or_404(models.Tag, pk=id) ,
'questions': paginate(request, models.Question.objects.get_by_tag(tag_id=id)),
"page_objects": paginate(request, objects_list = models.Question.objects.get_by_tag(tag_id=id)),
})
def edit(request):
user = get_object_or_404(models.CustomUser, username=request.user)
if request.method == 'POST':
form = UserSettingsForm(instance=user,
data=request.POST,
files=request.FILES
)
if form.is_valid():
form.save()
return profile(request, user.id)
else:
form = UserSettingsForm(instance=user)
return render(request, 'edit.html', {
'form': form,
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
})
@login_required(login_url='/log_in/')
def new_answer(request, id):
if models.Question.objects.filter(id=id).exists():
if request.method == 'POST':
form = AnswerForm(request.POST)
if form.is_valid():
#answeredQuestion = Question.objects.get_by_id(id)[0]
answeredQuestion = get_object_or_404(models.Question, pk=id)
answer = models.Answer.objects.create(author=request.user,
create_date=timezone.now(),
text=form.cleaned_data['text'],
question_id=answeredQuestion.id)
answer.save()
return redirect('/question/{}/add_answer/'.format(id))
else:
form = AnswerForm()
#return render(request, 'question/new_answer.html', {'form': form})
return render(request, 'questions.html', {
'form': form,
'question': get_object_or_404(models.Question, pk=id),
'answers' : paginate(request, models.Answer.objects.get_hot_for_answer(id)),
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
'page_objects' : paginate(request, models.Answer.objects.get_hot_for_answer(id)),
})
else:
raise Http404
@login_required(login_url='/log_in/')
def ask(request):
error = True
if request.method == 'POST':
firstly = False
form = AskForm(request.POST)
if form.is_valid():
ques = models.Question.objects.create(author=request.user,
create_date=timezone.now(),
is_active=True,
title=form.cleaned_data['title'],
text=form.cleaned_data['text'])
ques.save()
for tagTitle in form.cleaned_data['tags'].split():
tag = models.Tag.objects.get_or_create(title=tagTitle)[0]
ques.tags.add(tag)
ques.save()
#return question(request, ques.id)
return redirect('/question/{}/'.format(ques.id))
else:
error = False
else:
form = AskForm()
firstly = True
return render(request, 'new_ask.html', {
'firstly': firstly,
'error': error,
'form': form,
'tags' : paginate(request, models.Tag.objects.hottest())[:10],
'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],
})
def signin(request):
last_page = request.GET['next']
if last_page == '/logout' or last_page == '/login':
last_page = '/'
error = False
if request.method == 'POST':
user = authenticate(username=request.POST['nickname'], password=request.POST['password'])
if user is not None:
login(request, user) # Авторизуем пользователя
return redirect(last_page)
else:
error = True
return render(request, 'login.html',
{'error': error,
'last_page': last_page,
'tags' : paginate(request, models.Tag.objects.hottest()),
'users' : paginate(request, models.CustomUser.objects.by_rating()),
})
def registration(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST, request.FILES)
print(user_form)
if user_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
login(request, user)
return redirect(request.GET.get('next') if request.GET.get('next') != '' else '/')
else:
print(user_form.errors)
else:
user_form = UserRegistrationForm()
return render(request,'registration.html',
{'form':user_form,})
def signout(request):
if not request.user.is_authenticated:
raise Http404
logout(request)
#return redirect(request.GET['from'])
return redirect('/')
def paginate(request, objects_list):
paginator = Paginator(objects_list, 30)
page = request.GET.get('page')
try:
objects = paginator.page(page)
except PageNotAnInteger:
objects = paginator.page(1)
except EmptyPage:
objects = paginator.page(paginator.num_pages)
return objects
@require_POST
def like_question(request):
question_id = request.POST.get('question_id', '')
like_type = request.POST.get('like_type', '')
question =get_object_or_404(Question, pk=question_id)
if not question:
return JsonResponse({"status": "error"})
if (like_type == 'like'):
question.rating += 1
elif (like_type == 'dislike'):
question.rating -= 1
question.save()
return JsonResponse({"status": "ok"})
@require_POST
def like_answer(request):
answer_id = request.POST.get('answer_id', '')
like_type = request.POST.get('like_type', '')
answer =get_object_or_404(Answer, pk=answer_id)
if not answer:
return JsonResponse({"status": "error"})
if (like_type == 'like'):
answer.rating += 1
elif (like_type == 'dislike'):
answer.rating -= 1
answer.save()
return JsonResponse({"status": "ok"})
@require_POST
def approve_answer(request):
answer_id = request.POST.get('answer_id', '')
answer =get_object_or_404(Answer, pk=answer_id)
if not answer:
return JsonResponse({"status": "error"})
answer.approved = not answer.approved
answer.save()
return JsonResponse({"status": "ok"})
|
normal
|
{
"blob_id": "c4b4585501319fd8a8106c91751bb1408912827a",
"index": 3180,
"step-1": "<mask token>\n\n\ndef top(request):\n return render(request, 'new_questions.html', {'title': 'Топ вопросов',\n 'questions': paginate(request, models.Question.objects.get_hot()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_hot())})\n\n\n<mask token>\n\n\ndef hot(request, id=1):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'hot.html', {'users': paginate(request, models.\n CustomUser.objects.by_rating())[:10], 'tags': paginate(request,\n models.Tag.objects.hottest())[:10], 'questions': paginate(request,\n objects_list=models.Question.objects.get_hot()), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_hot())})\n\n\n<mask token>\n\n\ndef question_page(request, id):\n return render(request, 'questions.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, objects_list=models.Answer.objects.get_hot_for_answer(id)),\n 'page_objects': paginate(request, objects_list=models.Answer.\n objects.get_hot_for_answer(id))})\n\n\ndef tag(request, id):\n return render(request, 'tag_find.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[0:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[0:10], 'tag':\n get_object_or_404(models.Tag, pk=id), 'questions': paginate(request,\n models.Question.objects.get_by_tag(tag_id=id)), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_by_tag(\n tag_id=id))})\n\n\n<mask token>\n\n\n@login_required(login_url='/log_in/')\ndef ask(request):\n error = True\n if request.method == 'POST':\n firstly = False\n form = AskForm(request.POST)\n if form.is_valid():\n ques = models.Question.objects.create(author=request.user,\n create_date=timezone.now(), is_active=True, title=form.\n cleaned_data['title'], text=form.cleaned_data['text'])\n ques.save()\n for tagTitle in form.cleaned_data['tags'].split():\n tag = models.Tag.objects.get_or_create(title=tagTitle)[0]\n ques.tags.add(tag)\n ques.save()\n return redirect('/question/{}/'.format(ques.id))\n else:\n error = False\n else:\n form = AskForm()\n firstly = True\n return render(request, 'new_ask.html', {'firstly': firstly, 'error':\n error, 'form': form, 'tags': paginate(request, models.Tag.objects.\n hottest())[:10], 'users': paginate(request, models.CustomUser.\n objects.by_rating())[:10]})\n\n\ndef signin(request):\n last_page = request.GET['next']\n if last_page == '/logout' or last_page == '/login':\n last_page = '/'\n error = False\n if request.method == 'POST':\n user = authenticate(username=request.POST['nickname'], password=\n request.POST['password'])\n if user is not None:\n login(request, user)\n return redirect(last_page)\n else:\n error = True\n return render(request, 'login.html', {'error': error, 'last_page':\n last_page, 'tags': paginate(request, models.Tag.objects.hottest()),\n 'users': paginate(request, models.CustomUser.objects.by_rating())})\n\n\n<mask token>\n\n\ndef signout(request):\n if not request.user.is_authenticated:\n raise Http404\n logout(request)\n return redirect('/')\n\n\n<mask token>\n\n\n@require_POST\ndef like_question(request):\n question_id = request.POST.get('question_id', '')\n like_type = request.POST.get('like_type', '')\n question = get_object_or_404(Question, pk=question_id)\n if not question:\n return JsonResponse({'status': 'error'})\n if like_type == 'like':\n question.rating += 1\n elif like_type == 'dislike':\n question.rating -= 1\n question.save()\n return JsonResponse({'status': 'ok'})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef top(request):\n return render(request, 'new_questions.html', {'title': 'Топ вопросов',\n 'questions': paginate(request, models.Question.objects.get_hot()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_hot())})\n\n\ndef new(request):\n return render(request, 'new_questions.html', {'title': 'Новые',\n 'questions': paginate(request, models.Question.objects.get_new()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_new())})\n\n\ndef hot(request, id=1):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'hot.html', {'users': paginate(request, models.\n CustomUser.objects.by_rating())[:10], 'tags': paginate(request,\n models.Tag.objects.hottest())[:10], 'questions': paginate(request,\n objects_list=models.Question.objects.get_hot()), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_hot())})\n\n\ndef profile(request, id):\n return render(request, 'user_settings.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'profile':\n get_object_or_404(models.CustomUser, pk=id)})\n\n\n<mask token>\n\n\ndef question_page(request, id):\n return render(request, 'questions.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, objects_list=models.Answer.objects.get_hot_for_answer(id)),\n 'page_objects': paginate(request, objects_list=models.Answer.\n objects.get_hot_for_answer(id))})\n\n\ndef tag(request, id):\n return render(request, 'tag_find.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[0:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[0:10], 'tag':\n get_object_or_404(models.Tag, pk=id), 'questions': paginate(request,\n models.Question.objects.get_by_tag(tag_id=id)), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_by_tag(\n tag_id=id))})\n\n\n<mask token>\n\n\n@login_required(login_url='/log_in/')\ndef new_answer(request, id):\n if models.Question.objects.filter(id=id).exists():\n if request.method == 'POST':\n form = AnswerForm(request.POST)\n if form.is_valid():\n answeredQuestion = get_object_or_404(models.Question, pk=id)\n answer = models.Answer.objects.create(author=request.user,\n create_date=timezone.now(), text=form.cleaned_data[\n 'text'], question_id=answeredQuestion.id)\n answer.save()\n return redirect('/question/{}/add_answer/'.format(id))\n else:\n form = AnswerForm()\n return render(request, 'questions.html', {'form': form, 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, models.Answer.objects.get_hot_for_answer(id)), 'tags':\n paginate(request, models.Tag.objects.hottest())[:10], 'users':\n paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects': paginate(request, models.Answer.objects.\n get_hot_for_answer(id))})\n else:\n raise Http404\n\n\n@login_required(login_url='/log_in/')\ndef ask(request):\n error = True\n if request.method == 'POST':\n firstly = False\n form = AskForm(request.POST)\n if form.is_valid():\n ques = models.Question.objects.create(author=request.user,\n create_date=timezone.now(), is_active=True, title=form.\n cleaned_data['title'], text=form.cleaned_data['text'])\n ques.save()\n for tagTitle in form.cleaned_data['tags'].split():\n tag = models.Tag.objects.get_or_create(title=tagTitle)[0]\n ques.tags.add(tag)\n ques.save()\n return redirect('/question/{}/'.format(ques.id))\n else:\n error = False\n else:\n form = AskForm()\n firstly = True\n return render(request, 'new_ask.html', {'firstly': firstly, 'error':\n error, 'form': form, 'tags': paginate(request, models.Tag.objects.\n hottest())[:10], 'users': paginate(request, models.CustomUser.\n objects.by_rating())[:10]})\n\n\ndef signin(request):\n last_page = request.GET['next']\n if last_page == '/logout' or last_page == '/login':\n last_page = '/'\n error = False\n if request.method == 'POST':\n user = authenticate(username=request.POST['nickname'], password=\n request.POST['password'])\n if user is not None:\n login(request, user)\n return redirect(last_page)\n else:\n error = True\n return render(request, 'login.html', {'error': error, 'last_page':\n last_page, 'tags': paginate(request, models.Tag.objects.hottest()),\n 'users': paginate(request, models.CustomUser.objects.by_rating())})\n\n\ndef registration(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST, request.FILES)\n print(user_form)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n login(request, user)\n return redirect(request.GET.get('next') if request.GET.get(\n 'next') != '' else '/')\n else:\n print(user_form.errors)\n else:\n user_form = UserRegistrationForm()\n return render(request, 'registration.html', {'form': user_form})\n\n\ndef signout(request):\n if not request.user.is_authenticated:\n raise Http404\n logout(request)\n return redirect('/')\n\n\n<mask token>\n\n\n@require_POST\ndef like_question(request):\n question_id = request.POST.get('question_id', '')\n like_type = request.POST.get('like_type', '')\n question = get_object_or_404(Question, pk=question_id)\n if not question:\n return JsonResponse({'status': 'error'})\n if like_type == 'like':\n question.rating += 1\n elif like_type == 'dislike':\n question.rating -= 1\n question.save()\n return JsonResponse({'status': 'ok'})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef top(request):\n return render(request, 'new_questions.html', {'title': 'Топ вопросов',\n 'questions': paginate(request, models.Question.objects.get_hot()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_hot())})\n\n\ndef new(request):\n return render(request, 'new_questions.html', {'title': 'Новые',\n 'questions': paginate(request, models.Question.objects.get_new()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_new())})\n\n\ndef hot(request, id=1):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'hot.html', {'users': paginate(request, models.\n CustomUser.objects.by_rating())[:10], 'tags': paginate(request,\n models.Tag.objects.hottest())[:10], 'questions': paginate(request,\n objects_list=models.Question.objects.get_hot()), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_hot())})\n\n\ndef profile(request, id):\n return render(request, 'user_settings.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'profile':\n get_object_or_404(models.CustomUser, pk=id)})\n\n\ndef user_questions(request, id):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'user_question.html', {'questions': paginate(\n request, models.Question.objects.get_by_user(user_id=id)), 'tags':\n paginate(request, models.Tag.objects.hottest())[:10], 'users':\n paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects': paginate(request, models.Question.objects.\n get_by_user(user_id=id))})\n\n\ndef question_page(request, id):\n return render(request, 'questions.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, objects_list=models.Answer.objects.get_hot_for_answer(id)),\n 'page_objects': paginate(request, objects_list=models.Answer.\n objects.get_hot_for_answer(id))})\n\n\ndef tag(request, id):\n return render(request, 'tag_find.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[0:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[0:10], 'tag':\n get_object_or_404(models.Tag, pk=id), 'questions': paginate(request,\n models.Question.objects.get_by_tag(tag_id=id)), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_by_tag(\n tag_id=id))})\n\n\n<mask token>\n\n\n@login_required(login_url='/log_in/')\ndef new_answer(request, id):\n if models.Question.objects.filter(id=id).exists():\n if request.method == 'POST':\n form = AnswerForm(request.POST)\n if form.is_valid():\n answeredQuestion = get_object_or_404(models.Question, pk=id)\n answer = models.Answer.objects.create(author=request.user,\n create_date=timezone.now(), text=form.cleaned_data[\n 'text'], question_id=answeredQuestion.id)\n answer.save()\n return redirect('/question/{}/add_answer/'.format(id))\n else:\n form = AnswerForm()\n return render(request, 'questions.html', {'form': form, 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, models.Answer.objects.get_hot_for_answer(id)), 'tags':\n paginate(request, models.Tag.objects.hottest())[:10], 'users':\n paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects': paginate(request, models.Answer.objects.\n get_hot_for_answer(id))})\n else:\n raise Http404\n\n\n@login_required(login_url='/log_in/')\ndef ask(request):\n error = True\n if request.method == 'POST':\n firstly = False\n form = AskForm(request.POST)\n if form.is_valid():\n ques = models.Question.objects.create(author=request.user,\n create_date=timezone.now(), is_active=True, title=form.\n cleaned_data['title'], text=form.cleaned_data['text'])\n ques.save()\n for tagTitle in form.cleaned_data['tags'].split():\n tag = models.Tag.objects.get_or_create(title=tagTitle)[0]\n ques.tags.add(tag)\n ques.save()\n return redirect('/question/{}/'.format(ques.id))\n else:\n error = False\n else:\n form = AskForm()\n firstly = True\n return render(request, 'new_ask.html', {'firstly': firstly, 'error':\n error, 'form': form, 'tags': paginate(request, models.Tag.objects.\n hottest())[:10], 'users': paginate(request, models.CustomUser.\n objects.by_rating())[:10]})\n\n\ndef signin(request):\n last_page = request.GET['next']\n if last_page == '/logout' or last_page == '/login':\n last_page = '/'\n error = False\n if request.method == 'POST':\n user = authenticate(username=request.POST['nickname'], password=\n request.POST['password'])\n if user is not None:\n login(request, user)\n return redirect(last_page)\n else:\n error = True\n return render(request, 'login.html', {'error': error, 'last_page':\n last_page, 'tags': paginate(request, models.Tag.objects.hottest()),\n 'users': paginate(request, models.CustomUser.objects.by_rating())})\n\n\ndef registration(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST, request.FILES)\n print(user_form)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n login(request, user)\n return redirect(request.GET.get('next') if request.GET.get(\n 'next') != '' else '/')\n else:\n print(user_form.errors)\n else:\n user_form = UserRegistrationForm()\n return render(request, 'registration.html', {'form': user_form})\n\n\ndef signout(request):\n if not request.user.is_authenticated:\n raise Http404\n logout(request)\n return redirect('/')\n\n\n<mask token>\n\n\n@require_POST\ndef like_question(request):\n question_id = request.POST.get('question_id', '')\n like_type = request.POST.get('like_type', '')\n question = get_object_or_404(Question, pk=question_id)\n if not question:\n return JsonResponse({'status': 'error'})\n if like_type == 'like':\n question.rating += 1\n elif like_type == 'dislike':\n question.rating -= 1\n question.save()\n return JsonResponse({'status': 'ok'})\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef index(request):\n return render(request, 'new_questions.html', {'title': 'Вопросы',\n 'questions': paginate(request, models.Question.objects.all()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.all())})\n\n\ndef top(request):\n return render(request, 'new_questions.html', {'title': 'Топ вопросов',\n 'questions': paginate(request, models.Question.objects.get_hot()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_hot())})\n\n\ndef new(request):\n return render(request, 'new_questions.html', {'title': 'Новые',\n 'questions': paginate(request, models.Question.objects.get_new()),\n 'tags': paginate(request, models.Tag.objects.hottest())[:10],\n 'users': paginate(request, models.CustomUser.objects.by_rating())[:\n 10], 'page_objects': paginate(request, models.Question.objects.\n get_new())})\n\n\ndef hot(request, id=1):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'hot.html', {'users': paginate(request, models.\n CustomUser.objects.by_rating())[:10], 'tags': paginate(request,\n models.Tag.objects.hottest())[:10], 'questions': paginate(request,\n objects_list=models.Question.objects.get_hot()), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_hot())})\n\n\ndef profile(request, id):\n return render(request, 'user_settings.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'profile':\n get_object_or_404(models.CustomUser, pk=id)})\n\n\ndef user_questions(request, id):\n \"\"\"docstring for Main_menu\"\"\"\n return render(request, 'user_question.html', {'questions': paginate(\n request, models.Question.objects.get_by_user(user_id=id)), 'tags':\n paginate(request, models.Tag.objects.hottest())[:10], 'users':\n paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects': paginate(request, models.Question.objects.\n get_by_user(user_id=id))})\n\n\ndef question_page(request, id):\n return render(request, 'questions.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, objects_list=models.Answer.objects.get_hot_for_answer(id)),\n 'page_objects': paginate(request, objects_list=models.Answer.\n objects.get_hot_for_answer(id))})\n\n\ndef tag(request, id):\n return render(request, 'tag_find.html', {'users': paginate(request,\n models.CustomUser.objects.by_rating())[0:10], 'tags': paginate(\n request, models.Tag.objects.hottest())[0:10], 'tag':\n get_object_or_404(models.Tag, pk=id), 'questions': paginate(request,\n models.Question.objects.get_by_tag(tag_id=id)), 'page_objects':\n paginate(request, objects_list=models.Question.objects.get_by_tag(\n tag_id=id))})\n\n\ndef edit(request):\n user = get_object_or_404(models.CustomUser, username=request.user)\n if request.method == 'POST':\n form = UserSettingsForm(instance=user, data=request.POST, files=\n request.FILES)\n if form.is_valid():\n form.save()\n return profile(request, user.id)\n else:\n form = UserSettingsForm(instance=user)\n return render(request, 'edit.html', {'form': form, 'tags': paginate(\n request, models.Tag.objects.hottest())[:10], 'users': paginate(\n request, models.CustomUser.objects.by_rating())[:10]})\n\n\n@login_required(login_url='/log_in/')\ndef new_answer(request, id):\n if models.Question.objects.filter(id=id).exists():\n if request.method == 'POST':\n form = AnswerForm(request.POST)\n if form.is_valid():\n answeredQuestion = get_object_or_404(models.Question, pk=id)\n answer = models.Answer.objects.create(author=request.user,\n create_date=timezone.now(), text=form.cleaned_data[\n 'text'], question_id=answeredQuestion.id)\n answer.save()\n return redirect('/question/{}/add_answer/'.format(id))\n else:\n form = AnswerForm()\n return render(request, 'questions.html', {'form': form, 'question':\n get_object_or_404(models.Question, pk=id), 'answers': paginate(\n request, models.Answer.objects.get_hot_for_answer(id)), 'tags':\n paginate(request, models.Tag.objects.hottest())[:10], 'users':\n paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects': paginate(request, models.Answer.objects.\n get_hot_for_answer(id))})\n else:\n raise Http404\n\n\n@login_required(login_url='/log_in/')\ndef ask(request):\n error = True\n if request.method == 'POST':\n firstly = False\n form = AskForm(request.POST)\n if form.is_valid():\n ques = models.Question.objects.create(author=request.user,\n create_date=timezone.now(), is_active=True, title=form.\n cleaned_data['title'], text=form.cleaned_data['text'])\n ques.save()\n for tagTitle in form.cleaned_data['tags'].split():\n tag = models.Tag.objects.get_or_create(title=tagTitle)[0]\n ques.tags.add(tag)\n ques.save()\n return redirect('/question/{}/'.format(ques.id))\n else:\n error = False\n else:\n form = AskForm()\n firstly = True\n return render(request, 'new_ask.html', {'firstly': firstly, 'error':\n error, 'form': form, 'tags': paginate(request, models.Tag.objects.\n hottest())[:10], 'users': paginate(request, models.CustomUser.\n objects.by_rating())[:10]})\n\n\ndef signin(request):\n last_page = request.GET['next']\n if last_page == '/logout' or last_page == '/login':\n last_page = '/'\n error = False\n if request.method == 'POST':\n user = authenticate(username=request.POST['nickname'], password=\n request.POST['password'])\n if user is not None:\n login(request, user)\n return redirect(last_page)\n else:\n error = True\n return render(request, 'login.html', {'error': error, 'last_page':\n last_page, 'tags': paginate(request, models.Tag.objects.hottest()),\n 'users': paginate(request, models.CustomUser.objects.by_rating())})\n\n\ndef registration(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST, request.FILES)\n print(user_form)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n login(request, user)\n return redirect(request.GET.get('next') if request.GET.get(\n 'next') != '' else '/')\n else:\n print(user_form.errors)\n else:\n user_form = UserRegistrationForm()\n return render(request, 'registration.html', {'form': user_form})\n\n\ndef signout(request):\n if not request.user.is_authenticated:\n raise Http404\n logout(request)\n return redirect('/')\n\n\ndef paginate(request, objects_list):\n paginator = Paginator(objects_list, 30)\n page = request.GET.get('page')\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n return objects\n\n\n@require_POST\ndef like_question(request):\n question_id = request.POST.get('question_id', '')\n like_type = request.POST.get('like_type', '')\n question = get_object_or_404(Question, pk=question_id)\n if not question:\n return JsonResponse({'status': 'error'})\n if like_type == 'like':\n question.rating += 1\n elif like_type == 'dislike':\n question.rating -= 1\n question.save()\n return JsonResponse({'status': 'ok'})\n\n\n@require_POST\ndef like_answer(request):\n answer_id = request.POST.get('answer_id', '')\n like_type = request.POST.get('like_type', '')\n answer = get_object_or_404(Answer, pk=answer_id)\n if not answer:\n return JsonResponse({'status': 'error'})\n if like_type == 'like':\n answer.rating += 1\n elif like_type == 'dislike':\n answer.rating -= 1\n answer.save()\n return JsonResponse({'status': 'ok'})\n\n\n@require_POST\ndef approve_answer(request):\n answer_id = request.POST.get('answer_id', '')\n answer = get_object_or_404(Answer, pk=answer_id)\n if not answer:\n return JsonResponse({'status': 'error'})\n answer.approved = not answer.approved\n answer.save()\n return JsonResponse({'status': 'ok'})\n",
"step-5": "# from django.shortcuts import render\n# from django.http import HttpResponse\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.views import generic\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nfrom . import models\nfrom django.utils import timezone\nfrom questions.forms import UserRegistrationForm, UserLoginForm, UserSettingsForm, AskForm, AnswerForm, UserForm\n\n# from .models import Post \n\n# Create your views here.\n\t\t\ndef index(request):\n return render(request, 'new_questions.html', {\n 'title': 'Вопросы',\n 'questions': paginate(request, models.Question.objects.all()),\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects' : paginate(request, models.Question.objects.all()),\n })\n\ndef top(request):\n return render(request, 'new_questions.html', {\n 'title': 'Топ вопросов',\n 'questions': paginate(request, models.Question.objects.get_hot()),\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects' : paginate(request, models.Question.objects.get_hot()),\n })\n\ndef new(request):\n return render(request, 'new_questions.html', {\n 'title': 'Новые',\n 'questions': paginate(request, models.Question.objects.get_new()),\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects' : paginate(request, models.Question.objects.get_new()),\n })\n\n\ndef hot(request, id=1):\n\t\"\"\"docstring for Main_menu\"\"\"\n\treturn render(request, \"hot.html\", {\n\t\t'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n\t\t'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n\t\t\"questions\" : paginate(request, objects_list = models.Question.objects.get_hot()),\n\t\t\"page_objects\" : paginate(request, objects_list = models.Question.objects.get_hot()),\n\t\t})\ndef profile(request, id):\n\treturn render(request, \"user_settings.html\", {\n\t\t'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n\t\t'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n\t\t\"profile\": get_object_or_404(models.CustomUser, pk=id),\n\t\t})\n\ndef user_questions(request, id):\t#Переделай вид страницы! не красиво!\n\t\"\"\"docstring for Main_menu\"\"\"\n\treturn render(request, \"user_question.html\", {\n\t\t'questions': paginate(request, models.Question.objects.get_by_user(user_id=id)),\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects' : paginate(request, models.Question.objects.get_by_user(user_id=id)),\n\t\t})\n\ndef question_page(request, id):\n\treturn render(request, \"questions.html\", {\n\t\t'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n\t\t'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n\t\t\"question\": get_object_or_404(models.Question, pk=id) ,\n\t\t\"answers\": paginate(request, objects_list = models.Answer.objects.get_hot_for_answer(id)),\n\t\t\"page_objects\": paginate(request, objects_list = models.Answer.objects.get_hot_for_answer(id)),\n\t\t})\n\ndef tag(request, id):\n return render(request, 'tag_find.html', {\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[0:10],\n 'tags' : paginate(request, models.Tag.objects.hottest())[0:10],\n 'tag' : get_object_or_404(models.Tag, pk=id) ,\n 'questions': paginate(request, models.Question.objects.get_by_tag(tag_id=id)),\n \"page_objects\": paginate(request, objects_list = models.Question.objects.get_by_tag(tag_id=id)),\n })\n\n\ndef edit(request):\n user = get_object_or_404(models.CustomUser, username=request.user)\n\n if request.method == 'POST':\n form = UserSettingsForm(instance=user,\n data=request.POST,\n files=request.FILES\n )\n if form.is_valid():\n form.save()\n return profile(request, user.id)\n else:\n form = UserSettingsForm(instance=user)\n\n return render(request, 'edit.html', {\n 'form': form,\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n })\n\n@login_required(login_url='/log_in/')\ndef new_answer(request, id):\n if models.Question.objects.filter(id=id).exists():\n if request.method == 'POST':\n form = AnswerForm(request.POST)\n if form.is_valid():\n #answeredQuestion = Question.objects.get_by_id(id)[0]\n answeredQuestion = get_object_or_404(models.Question, pk=id)\n answer = models.Answer.objects.create(author=request.user,\n create_date=timezone.now(),\n text=form.cleaned_data['text'],\n question_id=answeredQuestion.id)\n answer.save()\n return redirect('/question/{}/add_answer/'.format(id))\n else:\n form = AnswerForm()\n #return render(request, 'question/new_answer.html', {'form': form})\n return render(request, 'questions.html', {\n 'form': form,\n 'question': get_object_or_404(models.Question, pk=id),\n 'answers' : paginate(request, models.Answer.objects.get_hot_for_answer(id)),\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n 'page_objects' : paginate(request, models.Answer.objects.get_hot_for_answer(id)),\n })\n else:\n raise Http404\n\n@login_required(login_url='/log_in/')\ndef ask(request):\n error = True\n if request.method == 'POST':\n firstly = False\n form = AskForm(request.POST)\n if form.is_valid():\n ques = models.Question.objects.create(author=request.user,\n create_date=timezone.now(),\n is_active=True,\n title=form.cleaned_data['title'],\n text=form.cleaned_data['text'])\n ques.save()\n\n for tagTitle in form.cleaned_data['tags'].split():\n tag = models.Tag.objects.get_or_create(title=tagTitle)[0]\n ques.tags.add(tag)\n ques.save()\n #return question(request, ques.id)\n return redirect('/question/{}/'.format(ques.id))\n else:\n error = False\n else:\n form = AskForm()\n firstly = True\n return render(request, 'new_ask.html', {\n 'firstly': firstly,\n 'error': error,\n 'form': form,\n 'tags' : paginate(request, models.Tag.objects.hottest())[:10],\n 'users' : paginate(request, models.CustomUser.objects.by_rating())[:10],\n })\n\ndef signin(request):\n last_page = request.GET['next']\n if last_page == '/logout' or last_page == '/login':\n last_page = '/'\n error = False\n if request.method == 'POST':\n user = authenticate(username=request.POST['nickname'], password=request.POST['password'])\n if user is not None:\n login(request, user) # Авторизуем пользователя\n return redirect(last_page)\n else:\n error = True\n return render(request, 'login.html',\n {'error': error,\n 'last_page': last_page,\n 'tags' : paginate(request, models.Tag.objects.hottest()),\n 'users' : paginate(request, models.CustomUser.objects.by_rating()),\n })\n\ndef registration(request):\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST, request.FILES)\n print(user_form)\n if user_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n login(request, user)\n return redirect(request.GET.get('next') if request.GET.get('next') != '' else '/')\n else:\n print(user_form.errors)\n else:\n user_form = UserRegistrationForm()\n return render(request,'registration.html',\n {'form':user_form,})\n\ndef signout(request):\n if not request.user.is_authenticated:\n raise Http404\n logout(request)\n #return redirect(request.GET['from'])\n return redirect('/')\n\n\ndef paginate(request, objects_list):\n paginator = Paginator(objects_list, 30)\n page = request.GET.get('page')\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n\n return objects\n\n@require_POST\ndef like_question(request):\n question_id = request.POST.get('question_id', '')\n like_type = request.POST.get('like_type', '')\n question =get_object_or_404(Question, pk=question_id)\n if not question:\n return JsonResponse({\"status\": \"error\"})\n\n if (like_type == 'like'):\n question.rating += 1\n elif (like_type == 'dislike'):\n question.rating -= 1\n question.save()\n\n return JsonResponse({\"status\": \"ok\"})\n\n@require_POST\ndef like_answer(request):\n answer_id = request.POST.get('answer_id', '')\n like_type = request.POST.get('like_type', '')\n answer =get_object_or_404(Answer, pk=answer_id)\n if not answer:\n return JsonResponse({\"status\": \"error\"})\n\n if (like_type == 'like'):\n answer.rating += 1\n elif (like_type == 'dislike'):\n answer.rating -= 1\n answer.save()\n\n return JsonResponse({\"status\": \"ok\"})\n\n\n@require_POST\ndef approve_answer(request):\n answer_id = request.POST.get('answer_id', '')\n answer =get_object_or_404(Answer, pk=answer_id)\n if not answer:\n return JsonResponse({\"status\": \"error\"})\n\n answer.approved = not answer.approved\n answer.save()\n\n return JsonResponse({\"status\": \"ok\"})",
"step-ids": [
8,
12,
13,
18,
20
]
}
|
[
8,
12,
13,
18,
20
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('mykey.key', 'rb') as mykey:
key = mykey.read()
<|reserved_special_token_0|>
with open('encryptedpassword.txt', 'rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
<|reserved_special_token_0|>
with open('decryptedpassword.txt', 'wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('mykey.key', 'rb') as mykey:
key = mykey.read()
f = Fernet(key)
with open('encryptedpassword.txt', 'rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
decrypt = f.decrypt(encrypte_file)
with open('decryptedpassword.txt', 'wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
<|reserved_special_token_1|>
from os import read
from cryptography.fernet import Fernet
with open('mykey.key', 'rb') as mykey:
key = mykey.read()
f = Fernet(key)
with open('encryptedpassword.txt', 'rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
decrypt = f.decrypt(encrypte_file)
with open('decryptedpassword.txt', 'wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
<|reserved_special_token_1|>
from os import read
from cryptography.fernet import Fernet
#create a key
# key = Fernet.generate_key()
#When every we run this code we will create a new key
# with open('mykey.key','wb') as mykey:
# mykey.write(key)
#To avoid create a new key and reuse the same key
with open('mykey.key','rb') as mykey:
key = mykey.read()
#print(key)
# f = Fernet(key)
# with open('Mailing Client/password.txt','rb') as original_file:
# original = original_file.read()
# #encrypt the data
# encrypted = f.encrypt(original)
# with open('encryptedpassword.txt','wb') as encrypted_password_file:
# encrypted_file = encrypted_password_file.write(encrypted)
#Decrypt Part
f = Fernet(key)
with open('encryptedpassword.txt','rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
decrypt = f.decrypt(encrypte_file)
with open('decryptedpassword.txt','wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
|
flexible
|
{
"blob_id": "df828344b81a40b7101adcc6759780ea84f2c6b4",
"index": 4698,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\n<mask token>\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\n<mask token>\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-3": "<mask token>\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\nf = Fernet(key)\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\ndecrypt = f.decrypt(encrypte_file)\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-4": "from os import read\nfrom cryptography.fernet import Fernet\nwith open('mykey.key', 'rb') as mykey:\n key = mykey.read()\nf = Fernet(key)\nwith open('encryptedpassword.txt', 'rb') as encrypted_password_file:\n encrypte_file = encrypted_password_file.read()\ndecrypt = f.decrypt(encrypte_file)\nwith open('decryptedpassword.txt', 'wb') as decrypted_password_file:\n decrypted_file = decrypted_password_file.write(decrypt)\n",
"step-5": "from os import read\r\nfrom cryptography.fernet import Fernet\r\n #create a key\r\n# key = Fernet.generate_key()\r\n\r\n#When every we run this code we will create a new key \r\n# with open('mykey.key','wb') as mykey:\r\n# mykey.write(key)\r\n\r\n#To avoid create a new key and reuse the same key\r\n\r\nwith open('mykey.key','rb') as mykey:\r\n key = mykey.read()\r\n\r\n#print(key)\r\n\r\n# f = Fernet(key)\r\n\r\n# with open('Mailing Client/password.txt','rb') as original_file:\r\n# original = original_file.read()\r\n\r\n# #encrypt the data\r\n\r\n# encrypted = f.encrypt(original)\r\n\r\n# with open('encryptedpassword.txt','wb') as encrypted_password_file:\r\n# encrypted_file = encrypted_password_file.write(encrypted)\r\n\r\n#Decrypt Part\r\n\r\nf = Fernet(key)\r\n\r\nwith open('encryptedpassword.txt','rb') as encrypted_password_file:\r\n encrypte_file = encrypted_password_file.read()\r\n\r\ndecrypt = f.decrypt(encrypte_file)\r\n\r\nwith open('decryptedpassword.txt','wb') as decrypted_password_file:\r\n decrypted_file = decrypted_password_file.write(decrypt)\r\n\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
<|reserved_special_token_0|>
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
<|reserved_special_token_0|>
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import pytest
from yatsm import utils
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
@pytest.mark.parametrize(('dtypes', 'ans'), [((np.uint8, np.int16), np.
int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.
uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.
float32, np.float64), np.float64)])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
<|reserved_special_token_1|>
""" Tests for `yatsm.utils`
"""
import numpy as np
import pytest
from yatsm import utils
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_interlaced(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(793, 13), (700, 1), (700, 700)])
def test_distribute_jobs_sequential(nrow, njob):
assigned = []
for i in range(njob):
assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))
assigned = np.sort(np.asarray(assigned))
all_rows = np.arange(0, nrow)
np.testing.assert_equal(assigned, all_rows)
@pytest.mark.parametrize('nrow,njob', [(700, 1)])
def test_distribute_jobs_sequential_onejob(nrow, njob):
with pytest.raises(ValueError):
utils.distribute_jobs(nrow, nrow, njob, interlaced=False)
# mkdir_p
def test_mkdir_p_success(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_succcess_exists(tmpdir):
utils.mkdir_p(tmpdir.join('test').strpath)
utils.mkdir_p(tmpdir.join('test').strpath)
def test_mkdir_p_failure_permission(tmpdir):
with pytest.raises(OSError):
utils.mkdir_p('/asdf')
# np_promote_all_types
@pytest.mark.parametrize(('dtypes', 'ans'), [
((np.uint8, np.int16), np.int16),
((np.uint8, np.uint16, np.int16), np.int32),
((np.uint8, np.uint16, np.int16, np.float), np.float),
((np.uint8, np.float16, np.float32, np.float64), np.float64),
])
def test_np_promote_all_types(dtypes, ans):
test_ans = utils.np_promote_all_types(*dtypes)
assert test_ans == ans
|
flexible
|
{
"blob_id": "a513dfd84b5d9267b7e96fedc88e5b6dabeea19e",
"index": 640,
"step-1": "<mask token>\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_sequential(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\n<mask token>\n\n\ndef test_mkdir_p_success(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\n<mask token>\n\n\ndef test_mkdir_p_failure_permission(tmpdir):\n with pytest.raises(OSError):\n utils.mkdir_p('/asdf')\n\n\[email protected](('dtypes', 'ans'), [((np.uint8, np.int16), np.\n int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.\n uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.\n float32, np.float64), np.float64)])\ndef test_np_promote_all_types(dtypes, ans):\n test_ans = utils.np_promote_all_types(*dtypes)\n assert test_ans == ans\n",
"step-2": "<mask token>\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_sequential(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(700, 1)])\ndef test_distribute_jobs_sequential_onejob(nrow, njob):\n with pytest.raises(ValueError):\n utils.distribute_jobs(nrow, nrow, njob, interlaced=False)\n\n\ndef test_mkdir_p_success(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_succcess_exists(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_failure_permission(tmpdir):\n with pytest.raises(OSError):\n utils.mkdir_p('/asdf')\n\n\[email protected](('dtypes', 'ans'), [((np.uint8, np.int16), np.\n int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.\n uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.\n float32, np.float64), np.float64)])\ndef test_np_promote_all_types(dtypes, ans):\n test_ans = utils.np_promote_all_types(*dtypes)\n assert test_ans == ans\n",
"step-3": "<mask token>\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_interlaced(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_sequential(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(700, 1)])\ndef test_distribute_jobs_sequential_onejob(nrow, njob):\n with pytest.raises(ValueError):\n utils.distribute_jobs(nrow, nrow, njob, interlaced=False)\n\n\ndef test_mkdir_p_success(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_succcess_exists(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_failure_permission(tmpdir):\n with pytest.raises(OSError):\n utils.mkdir_p('/asdf')\n\n\[email protected](('dtypes', 'ans'), [((np.uint8, np.int16), np.\n int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.\n uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.\n float32, np.float64), np.float64)])\ndef test_np_promote_all_types(dtypes, ans):\n test_ans = utils.np_promote_all_types(*dtypes)\n assert test_ans == ans\n",
"step-4": "<mask token>\nimport numpy as np\nimport pytest\nfrom yatsm import utils\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_interlaced(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_sequential(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(700, 1)])\ndef test_distribute_jobs_sequential_onejob(nrow, njob):\n with pytest.raises(ValueError):\n utils.distribute_jobs(nrow, nrow, njob, interlaced=False)\n\n\ndef test_mkdir_p_success(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_succcess_exists(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_failure_permission(tmpdir):\n with pytest.raises(OSError):\n utils.mkdir_p('/asdf')\n\n\[email protected](('dtypes', 'ans'), [((np.uint8, np.int16), np.\n int16), ((np.uint8, np.uint16, np.int16), np.int32), ((np.uint8, np.\n uint16, np.int16, np.float), np.float), ((np.uint8, np.float16, np.\n float32, np.float64), np.float64)])\ndef test_np_promote_all_types(dtypes, ans):\n test_ans = utils.np_promote_all_types(*dtypes)\n assert test_ans == ans\n",
"step-5": "\"\"\" Tests for `yatsm.utils`\n\"\"\"\nimport numpy as np\nimport pytest\n\nfrom yatsm import utils\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_interlaced(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=True))\n\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(793, 13), (700, 1), (700, 700)])\ndef test_distribute_jobs_sequential(nrow, njob):\n assigned = []\n for i in range(njob):\n assigned.extend(utils.distribute_jobs(i, njob, nrow, interlaced=False))\n\n assigned = np.sort(np.asarray(assigned))\n all_rows = np.arange(0, nrow)\n np.testing.assert_equal(assigned, all_rows)\n\n\[email protected]('nrow,njob', [(700, 1)])\ndef test_distribute_jobs_sequential_onejob(nrow, njob):\n with pytest.raises(ValueError):\n utils.distribute_jobs(nrow, nrow, njob, interlaced=False)\n\n\n# mkdir_p\ndef test_mkdir_p_success(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_succcess_exists(tmpdir):\n utils.mkdir_p(tmpdir.join('test').strpath)\n utils.mkdir_p(tmpdir.join('test').strpath)\n\n\ndef test_mkdir_p_failure_permission(tmpdir):\n with pytest.raises(OSError):\n utils.mkdir_p('/asdf')\n\n\n# np_promote_all_types\[email protected](('dtypes', 'ans'), [\n ((np.uint8, np.int16), np.int16),\n ((np.uint8, np.uint16, np.int16), np.int32),\n ((np.uint8, np.uint16, np.int16, np.float), np.float),\n ((np.uint8, np.float16, np.float32, np.float64), np.float64),\n])\ndef test_np_promote_all_types(dtypes, ans):\n test_ans = utils.np_promote_all_types(*dtypes)\n assert test_ans == ans\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import numpy as np
import scipy.io as sio
import os
import torch
from torchvision.utils import save_image
from tools import *
def test(config, base, loaders, brief):
compute_and_save_features(base, loaders)
results = evalutate(config, base, brief)
return results
def evalutate(config, base, brief=False):
results = {}
for mode in config.modes:
print(mode)
for number_shot in config.number_shots:
print(number_shot)
cmc, map = evaluate_sysymm01(base.save_features_path, mode, number_shot)
results['{},{}'.format(mode, number_shot)] = [cmc, map]
if brief: break
if brief: break
return results
def compute_and_save_features(base, loaders):
def compute_features(images):
images_f = fliplr(images)
images = images.to(base.device)
images_f = images_f.to(base.device)
features = base.encoder(base.process_images_4_encoder(images, True, True))
features_f = base.encoder(base.process_images_4_encoder(images_f, True, True))
features, _, _, _ = base.embeder(features)
features_f, _, _, _ = base.embeder(features_f)
features = features + features_f
if base.part_num == 1:
features = torch.unsqueeze(features, -1)
return features
def normalize_and_resize_feature(features):
# normlize
norm = torch.norm(features, dim=1, keepdim=True)
features = features / norm.repeat([1, features.size(1), 1])
# resize
features = features.view(features.size(0), -1)
return features
class XX:
def __init__(self):
self.val = {}
def update(self, key, value):
if key not in self.val.keys():
self.val[key] = value
else:
self.val[key] = np.concatenate([self.val[key], value], axis=0)
def get_val(self, key):
if key in self.val.keys():
return self.val[key]
else:
return np.array([[]])
print('Time:{}. Start to compute features'.format(time_now()))
# compute features
# base._resume_model(test_step)
base.set_eval()
features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()
with torch.no_grad():
for i, data in enumerate(loaders.rgb_all_loader):
# load data
images, pids, cids, _ = data
images = base.G_rgb2ir(images.to(base.device)).data.cpu()
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
for i, data in enumerate(loaders.ir_all_loader):
# load data
images, pids, cids, _ = data
# forward
features = compute_features(images)
# meter
features_meter.update(features.data)
pids_meter.update(pids.data)
cids_meter.update(cids.data)
print('Time:{}. Start to normalize features.'.format(time_now()))
# normalize features
features = features_meter.get_val()
features = normalize_and_resize_feature(features)
features = features.data.cpu().numpy()
pids = pids_meter.get_val_numpy()
cids = cids_meter.get_val_numpy()
print('Time: {}. Note: Start to save features as .mat file'.format(time_now()))
# save features as .mat file
results = {1: XX(), 2: XX(), 3: XX(), 4: XX(), 5: XX(), 6: XX()}
for i in range(features.shape[0]):
feature = features[i, :]
feature = np.resize(feature, [1, feature.shape[0]])
cid, pid = cids[i], pids[i]
results[cid].update(pid, feature)
pid_num_of_cids = [333, 333, 533, 533, 533, 333]
cids = [1, 2, 3, 4, 5, 6]
for cid in cids:
a_result = results[cid]
xx = []
for pid in range(1, 1+ pid_num_of_cids[cid - 1]):
xx.append([a_result.get_val(pid).astype(np.double)])
xx = np.array(xx)
sio.savemat(os.path.join(base.save_features_path, 'feature_cam{}.mat'.format(cid)), {'feature': xx})
def save_images(base, current_step):
#base.set_eval()
with torch.no_grad():
fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images).detach()
xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images, base.fixed_real_ir_images], dim=0)
save_image((xxxx.data.cpu() + 1.0) / 2.0,
os.path.join(base.save_images_path, 'image_{}.jpg'.format(current_step)), nrow=base.fixed_real_rgb_images.size(0), padding=0)
|
normal
|
{
"blob_id": "b21796a9e10314f80cac3151d1fdbb139966303f",
"index": 5555,
"step-1": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\ndef save_images(base, current_step):\n with torch.no_grad():\n fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images\n ).detach()\n xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images,\n base.fixed_real_ir_images], dim=0)\n save_image((xxxx.data.cpu() + 1.0) / 2.0, os.path.join(base.\n save_images_path, 'image_{}.jpg'.format(current_step)), nrow=\n base.fixed_real_rgb_images.size(0), padding=0)\n",
"step-4": "import numpy as np\nimport scipy.io as sio\nimport os\nimport torch\nfrom torchvision.utils import save_image\nfrom tools import *\n\n\ndef test(config, base, loaders, brief):\n compute_and_save_features(base, loaders)\n results = evalutate(config, base, brief)\n return results\n\n\ndef evalutate(config, base, brief=False):\n results = {}\n for mode in config.modes:\n print(mode)\n for number_shot in config.number_shots:\n print(number_shot)\n cmc, map = evaluate_sysymm01(base.save_features_path, mode,\n number_shot)\n results['{},{}'.format(mode, number_shot)] = [cmc, map]\n if brief:\n break\n if brief:\n break\n return results\n\n\ndef compute_and_save_features(base, loaders):\n\n def compute_features(images):\n images_f = fliplr(images)\n images = images.to(base.device)\n images_f = images_f.to(base.device)\n features = base.encoder(base.process_images_4_encoder(images, True,\n True))\n features_f = base.encoder(base.process_images_4_encoder(images_f, \n True, True))\n features, _, _, _ = base.embeder(features)\n features_f, _, _, _ = base.embeder(features_f)\n features = features + features_f\n if base.part_num == 1:\n features = torch.unsqueeze(features, -1)\n return features\n\n def normalize_and_resize_feature(features):\n norm = torch.norm(features, dim=1, keepdim=True)\n features = features / norm.repeat([1, features.size(1), 1])\n features = features.view(features.size(0), -1)\n return features\n\n\n class XX:\n\n def __init__(self):\n self.val = {}\n\n def update(self, key, value):\n if key not in self.val.keys():\n self.val[key] = value\n else:\n self.val[key] = np.concatenate([self.val[key], value], axis=0)\n\n def get_val(self, key):\n if key in self.val.keys():\n return self.val[key]\n else:\n return np.array([[]])\n print('Time:{}. Start to compute features'.format(time_now()))\n base.set_eval()\n features_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n with torch.no_grad():\n for i, data in enumerate(loaders.rgb_all_loader):\n images, pids, cids, _ = data\n images = base.G_rgb2ir(images.to(base.device)).data.cpu()\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n for i, data in enumerate(loaders.ir_all_loader):\n images, pids, cids, _ = data\n features = compute_features(images)\n features_meter.update(features.data)\n pids_meter.update(pids.data)\n cids_meter.update(cids.data)\n print('Time:{}. Start to normalize features.'.format(time_now()))\n features = features_meter.get_val()\n features = normalize_and_resize_feature(features)\n features = features.data.cpu().numpy()\n pids = pids_meter.get_val_numpy()\n cids = cids_meter.get_val_numpy()\n print('Time: {}. Note: Start to save features as .mat file'.format(\n time_now()))\n results = {(1): XX(), (2): XX(), (3): XX(), (4): XX(), (5): XX(), (6): XX()\n }\n for i in range(features.shape[0]):\n feature = features[i, :]\n feature = np.resize(feature, [1, feature.shape[0]])\n cid, pid = cids[i], pids[i]\n results[cid].update(pid, feature)\n pid_num_of_cids = [333, 333, 533, 533, 533, 333]\n cids = [1, 2, 3, 4, 5, 6]\n for cid in cids:\n a_result = results[cid]\n xx = []\n for pid in range(1, 1 + pid_num_of_cids[cid - 1]):\n xx.append([a_result.get_val(pid).astype(np.double)])\n xx = np.array(xx)\n sio.savemat(os.path.join(base.save_features_path,\n 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\ndef save_images(base, current_step):\n with torch.no_grad():\n fixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images\n ).detach()\n xxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images,\n base.fixed_real_ir_images], dim=0)\n save_image((xxxx.data.cpu() + 1.0) / 2.0, os.path.join(base.\n save_images_path, 'image_{}.jpg'.format(current_step)), nrow=\n base.fixed_real_rgb_images.size(0), padding=0)\n",
"step-5": "import numpy as np\nimport scipy.io as sio\nimport os\n\nimport torch\nfrom torchvision.utils import save_image\n\nfrom tools import *\n\n\n\ndef test(config, base, loaders, brief):\n\n\tcompute_and_save_features(base, loaders)\n\tresults = evalutate(config, base, brief)\n\treturn results\n\n\ndef evalutate(config, base, brief=False):\n\n\tresults = {}\n\tfor mode in config.modes:\n\t\tprint(mode)\n\t\tfor number_shot in config.number_shots:\n\t\t\tprint(number_shot)\n\t\t\tcmc, map = evaluate_sysymm01(base.save_features_path, mode, number_shot)\n\t\t\tresults['{},{}'.format(mode, number_shot)] = [cmc, map]\n\t\t\tif brief: break\n\t\tif brief: break\n\n\treturn results\n\n\ndef compute_and_save_features(base, loaders):\n\n\tdef compute_features(images):\n\t\timages_f = fliplr(images)\n\t\timages = images.to(base.device)\n\t\timages_f = images_f.to(base.device)\n\t\tfeatures = base.encoder(base.process_images_4_encoder(images, True, True))\n\t\tfeatures_f = base.encoder(base.process_images_4_encoder(images_f, True, True))\n\t\tfeatures, _, _, _ = base.embeder(features)\n\t\tfeatures_f, _, _, _ = base.embeder(features_f)\n\t\tfeatures = features + features_f\n\t\tif base.part_num == 1:\n\t\t\tfeatures = torch.unsqueeze(features, -1)\n\t\treturn features\n\n\tdef normalize_and_resize_feature(features):\n\t\t# normlize\n\t\tnorm = torch.norm(features, dim=1, keepdim=True)\n\t\tfeatures = features / norm.repeat([1, features.size(1), 1])\n\t\t# resize\n\t\tfeatures = features.view(features.size(0), -1)\n\t\treturn features\n\n\tclass XX:\n\t\tdef __init__(self):\n\t\t\tself.val = {}\n\t\tdef update(self, key, value):\n\t\t\tif key not in self.val.keys():\n\t\t\t\tself.val[key] = value\n\t\t\telse:\n\t\t\t\tself.val[key] = np.concatenate([self.val[key], value], axis=0)\n\t\tdef get_val(self, key):\n\t\t\tif key in self.val.keys():\n\t\t\t\treturn self.val[key]\n\t\t\telse:\n\t\t\t\treturn np.array([[]])\n\n\n\tprint('Time:{}. Start to compute features'.format(time_now()))\n\t# compute features\n\t# base._resume_model(test_step)\n\tbase.set_eval()\n\tfeatures_meter, pids_meter, cids_meter = CatMeter(), CatMeter(), CatMeter()\n\n\twith torch.no_grad():\n\t\tfor i, data in enumerate(loaders.rgb_all_loader):\n\t\t\t# load data\n\t\t\timages, pids, cids, _ = data\n\t\t\timages = base.G_rgb2ir(images.to(base.device)).data.cpu()\n\t\t\t# forward\n\t\t\tfeatures = compute_features(images)\n\t\t\t# meter\n\t\t\tfeatures_meter.update(features.data)\n\t\t\tpids_meter.update(pids.data)\n\t\t\tcids_meter.update(cids.data)\n\n\t\tfor i, data in enumerate(loaders.ir_all_loader):\n\t\t\t# load data\n\t\t\timages, pids, cids, _ = data\n\t\t\t# forward\n\t\t\tfeatures = compute_features(images)\n\t\t\t# meter\n\t\t\tfeatures_meter.update(features.data)\n\t\t\tpids_meter.update(pids.data)\n\t\t\tcids_meter.update(cids.data)\n\n\tprint('Time:{}. Start to normalize features.'.format(time_now()))\n\t# normalize features\n\tfeatures = features_meter.get_val()\n\tfeatures = normalize_and_resize_feature(features)\n\tfeatures = features.data.cpu().numpy()\n\tpids = pids_meter.get_val_numpy()\n\tcids = cids_meter.get_val_numpy()\n\n\tprint('Time: {}. Note: Start to save features as .mat file'.format(time_now()))\n\t# save features as .mat file\n\tresults = {1: XX(), 2: XX(), 3: XX(), 4: XX(), 5: XX(), 6: XX()}\n\tfor i in range(features.shape[0]):\n\t\tfeature = features[i, :]\n\t\tfeature = np.resize(feature, [1, feature.shape[0]])\n\t\tcid, pid = cids[i], pids[i]\n\t\tresults[cid].update(pid, feature)\n\n\tpid_num_of_cids = [333, 333, 533, 533, 533, 333]\n\tcids = [1, 2, 3, 4, 5, 6]\n\tfor cid in cids:\n\t\ta_result = results[cid]\n\t\txx = []\n\t\tfor pid in range(1, 1+ pid_num_of_cids[cid - 1]):\n\t\t\txx.append([a_result.get_val(pid).astype(np.double)])\n\t\txx = np.array(xx)\n\t\tsio.savemat(os.path.join(base.save_features_path, 'feature_cam{}.mat'.format(cid)), {'feature': xx})\n\n\n\ndef save_images(base, current_step):\n\n\t#base.set_eval()\n\twith torch.no_grad():\n\t\tfixed_fake_ir_images = base.G_rgb2ir(base.fixed_real_rgb_images).detach()\n\t\txxxx = torch.cat([base.fixed_real_rgb_images, fixed_fake_ir_images, base.fixed_real_ir_images], dim=0)\n\t\tsave_image((xxxx.data.cpu() + 1.0) / 2.0,\n\t\t os.path.join(base.save_images_path, 'image_{}.jpg'.format(current_step)), nrow=base.fixed_real_rgb_images.size(0), padding=0)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.