code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 21:03:43 2019
@author: 00124175
"""
"""
读取txt文件
该文本中的分割符既有空格又有制表符('/t'),sep参数用'/s+',可以匹配任何空格。
"""
#header=None:没有每列的column name,可以自己设定
#encoding='gb2312':其他编码中文显示错误
#sep=',':用逗号来分隔每行的数据
#index_col=0:设置第1列数据作为index
import pandas as pd
data = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',skiprows=1)
data1 = pd.read_table("1206sjl.txt",header=None,encoding='gb2312',sep='|',nrows=1)
cols_name = data1.iloc[:,0:80]
mydata = data.iloc[:,0:80]#读所有的行,0-79列
cols_name = cols_name.values.tolist()#转换为list
mydata.columns = cols_name#加上列名称
mydata.rename(columns=lambda x: x.strip(' '),inplace=True)#去掉dataframe中的前后空格
mydata[['__lat,__deg','__lon,__deg']] = mydata[['__lat,__deg','__lon,__deg']].apply(pd.to_numeric)
my_need_data = mydata[(mydata['__lat,__deg']>39.14) & (mydata['__lat,__deg']<39.17)&(mydata['__lon,__deg']>117.51)&(mydata['__lon,__deg']<117.53)]
print(my_need_data.iloc[:,0:3])
my_need_data.to_csv("result_csv.csv", index=0)
|
normal
|
{
"blob_id": "ab760ec4cbb9f616f38b0f0f2221987460c6f618",
"index": 6492,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\n<mask token>\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-3": "<mask token>\ndata = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep='|',\n skiprows=1)\ndata1 = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep=\n '|', nrows=1)\ncols_name = data1.iloc[:, 0:80]\nmydata = data.iloc[:, 0:80]\ncols_name = cols_name.values.tolist()\nmydata.columns = cols_name\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\nmydata[['__lat,__deg', '__lon,__deg']] = mydata[['__lat,__deg', '__lon,__deg']\n ].apply(pd.to_numeric)\nmy_need_data = mydata[(mydata['__lat,__deg'] > 39.14) & (mydata[\n '__lat,__deg'] < 39.17) & (mydata['__lon,__deg'] > 117.51) & (mydata[\n '__lon,__deg'] < 117.53)]\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-4": "<mask token>\nimport pandas as pd\ndata = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep='|',\n skiprows=1)\ndata1 = pd.read_table('1206sjl.txt', header=None, encoding='gb2312', sep=\n '|', nrows=1)\ncols_name = data1.iloc[:, 0:80]\nmydata = data.iloc[:, 0:80]\ncols_name = cols_name.values.tolist()\nmydata.columns = cols_name\nmydata.rename(columns=lambda x: x.strip(' '), inplace=True)\nmydata[['__lat,__deg', '__lon,__deg']] = mydata[['__lat,__deg', '__lon,__deg']\n ].apply(pd.to_numeric)\nmy_need_data = mydata[(mydata['__lat,__deg'] > 39.14) & (mydata[\n '__lat,__deg'] < 39.17) & (mydata['__lon,__deg'] > 117.51) & (mydata[\n '__lon,__deg'] < 117.53)]\nprint(my_need_data.iloc[:, 0:3])\nmy_need_data.to_csv('result_csv.csv', index=0)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 18 21:03:43 2019\n\n@author: 00124175\n\"\"\"\n\n\"\"\"\n读取txt文件\n该文本中的分割符既有空格又有制表符('/t'),sep参数用'/s+',可以匹配任何空格。\n\"\"\"\n#header=None:没有每列的column name,可以自己设定\n#encoding='gb2312':其他编码中文显示错误\n#sep=',':用逗号来分隔每行的数据\n#index_col=0:设置第1列数据作为index\nimport pandas as pd\ndata = pd.read_table(\"1206sjl.txt\",header=None,encoding='gb2312',sep='|',skiprows=1)\ndata1 = pd.read_table(\"1206sjl.txt\",header=None,encoding='gb2312',sep='|',nrows=1)\n\ncols_name = data1.iloc[:,0:80]\nmydata = data.iloc[:,0:80]#读所有的行,0-79列\ncols_name = cols_name.values.tolist()#转换为list\nmydata.columns = cols_name#加上列名称\nmydata.rename(columns=lambda x: x.strip(' '),inplace=True)#去掉dataframe中的前后空格\nmydata[['__lat,__deg','__lon,__deg']] = mydata[['__lat,__deg','__lon,__deg']].apply(pd.to_numeric)\n\nmy_need_data = mydata[(mydata['__lat,__deg']>39.14) & (mydata['__lat,__deg']<39.17)&(mydata['__lon,__deg']>117.51)&(mydata['__lon,__deg']<117.53)]\nprint(my_need_data.iloc[:,0:3])\nmy_need_data.to_csv(\"result_csv.csv\", index=0)\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import pathlib
from blastsight.view.viewer import Viewer
"""
In this demo, we'll show how you can create a basic animation.
An animation is interpreted as changing the state of the viewer one frame at the time.
That means we'll define a function that makes a change in one single frame.
The function must receive a single argument, of the same type of the 'start' and 'end' values.
"""
v = Viewer()
path = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'
mesh = v.load_mesh(path, highlight=True)
def autorotate(angle):
v.set_rotation_angle([0.0, -angle, 0.0])
"""
The animate() method receives a 'start' value, an 'end' value, a 'method' (the function that changes
one frame in the viewer), and two optional kwargs: 'milliseconds' (how much time should the
animation last) and 'steps' (smoothness of the animation depends on this).
"""
# Start animation
v.animate(0, 360, autorotate, milliseconds=3000, steps=100)
# Show viewer
v.show()
|
normal
|
{
"blob_id": "00be3d813ce4335ff9ea02ed9f1884d3210f3d5a",
"index": 3101,
"step-1": "<mask token>\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-3": "<mask token>\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-4": "import pathlib\nfrom blastsight.view.viewer import Viewer\n<mask token>\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n<mask token>\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\nv.show()\n",
"step-5": "#!/usr/bin/env python\n\nimport pathlib\n\nfrom blastsight.view.viewer import Viewer\n\n\"\"\"\nIn this demo, we'll show how you can create a basic animation.\n\nAn animation is interpreted as changing the state of the viewer one frame at the time.\nThat means we'll define a function that makes a change in one single frame.\nThe function must receive a single argument, of the same type of the 'start' and 'end' values.\n\"\"\"\n\nv = Viewer()\npath = f'{pathlib.Path(__file__).parent.parent}/test_files/caseron.off'\nmesh = v.load_mesh(path, highlight=True)\n\n\ndef autorotate(angle):\n v.set_rotation_angle([0.0, -angle, 0.0])\n\n\n\"\"\"\nThe animate() method receives a 'start' value, an 'end' value, a 'method' (the function that changes\none frame in the viewer), and two optional kwargs: 'milliseconds' (how much time should the\nanimation last) and 'steps' (smoothness of the animation depends on this).\n\"\"\"\n\n# Start animation\nv.animate(0, 360, autorotate, milliseconds=3000, steps=100)\n\n# Show viewer\nv.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
##############################
# SConscript for OgreOpcode #
##############################
#SCons scripts maintained by:
# Van Aarde "nanocell" Krynauw.
#TODO:
# - Add commandline options to specify include dirs, defines, compiler defs, libraries, etc.
# - Add Sconscripts for the samples.
# - Add a binary SConstruct file.
#####################
# Options #
#####################
OPT_INCLUDES = '''#include
/usr/local/include/OGRE
'''
OPT_FLAGS = ' '
OPT_LIBNAME = 'OgreOpcode'
OPT_SUBDIRS = 'src'
OPT_CC = 'g++'
OPT_FLAGS = '-Wall -ansi'
OPT_LIBTARGET = '#/libs'
####################
import sys, os
################################
# Some helper functions #
################################
def BuildSharedLib(environment, name, objs):
output = env['LIBTARGET'] + '/' + name
environment.SharedLibrary(output, objs)
def CreateAppFromObjs(objs):
output = '#' + OPT_APPNAME
environment.Program( output, objs, duplicate=0)
#################################
platform_build = 'build' + os.path.sep + sys.platform
#Set up a building environment
env = Environment()
env.BuildSharedLib = BuildSharedLib
env.CreateAppFromObjs = CreateAppFromObjs
#Check if OgreOpcode objects should be built as static or shared
env.BuildObject = env.SharedObject
#Check if OgreOpcode is being compiled on linux. If so,
#then define LINUX_FLOAT
if sys.platform == "linux2":
OPT_FLAGS = OPT_FLAGS + ' -DLINUX_FLOAT'
env.Replace(CC = OPT_CC)
env.Replace(CXXFLAGS = Split(OPT_FLAGS))
#Current path. Used for relative hierarchical building
env['CURRENT_PATH'] = '#'
#The build directory for the current
#platform. TODO: Add command line
#support for cross compiling.
env['PLATFORM_BUILD'] = platform_build
#Target directory where the libaries
#will be built.
env['LIBTARGET'] = OPT_LIBTARGET
env.Append(CPPPATH = Split(OPT_INCLUDES))
#env.Append(LIBPATH = Split(LIBSDIR))
Export('env')
print "Building to: " + env['PLATFORM_BUILD']
#TODO: Check for dependencies...?
objs = []
#Map the 'build' subdirectory to be compile from 'src'
#Run all the SConscripts in sub directories.
for subdir in Split(OPT_SUBDIRS):
#Map a given subdirectory into the build directory...Let's see how this goes.
env.BuildDir( env['PLATFORM_BUILD'] + "/" + subdir, subdir, duplicate=0)
o = env.SConscript( env['PLATFORM_BUILD'] + "/" + subdir + '/SConscript')
#o = senv.SConscript( senv['PLATFORM_BUILD'] + '/' + subdir + '/SConscript')
objs.append(o)
#All the objects that were returned should be compiled
#into the final OgreOpcode library
#o = env.SConscript( env['PLATFORM_BUILD'] + os.path.sep + 'SConscript')
env.SharedLibrary(OPT_LIBTARGET + "/" + OPT_LIBNAME, o);
|
normal
|
{
"blob_id": "ed1df078ad2e8d770f3d8c41493b5537ed106e3a",
"index": 1954,
"step-1": "##############################\n# SConscript for OgreOpcode #\n##############################\n#SCons scripts maintained by:\n# Van Aarde \"nanocell\" Krynauw.\n\n#TODO:\n# - Add commandline options to specify include dirs, defines, compiler defs, libraries, etc.\n# - Add Sconscripts for the samples.\n# - Add a binary SConstruct file.\n\n#####################\n# Options #\n#####################\n\nOPT_INCLUDES =\t'''#include\n\t\t\t\t\t\t\t\t\t /usr/local/include/OGRE\n\t\t\t\t\t\t\t\t'''\n\nOPT_FLAGS = ' '\nOPT_LIBNAME = 'OgreOpcode'\nOPT_SUBDIRS = 'src'\nOPT_CC = 'g++'\nOPT_FLAGS = '-Wall -ansi'\nOPT_LIBTARGET = '#/libs'\n\n####################\n\nimport sys, os\n\n################################\n# Some helper functions #\n################################\ndef BuildSharedLib(environment, name, objs):\n\toutput = env['LIBTARGET'] + '/' + name\n\tenvironment.SharedLibrary(output, objs)\n\ndef CreateAppFromObjs(objs):\n\toutput = '#' + OPT_APPNAME\n\tenvironment.Program( output, objs, duplicate=0)\n#################################\n\nplatform_build = 'build' + os.path.sep + sys.platform\n\n#Set up a building environment\n\nenv = Environment()\n\nenv.BuildSharedLib = BuildSharedLib\nenv.CreateAppFromObjs = CreateAppFromObjs\n\n#Check if OgreOpcode objects should be built as static or shared\nenv.BuildObject = env.SharedObject\n\n#Check if OgreOpcode is being compiled on linux. If so,\n#then define LINUX_FLOAT\n\nif sys.platform == \"linux2\":\n\tOPT_FLAGS = OPT_FLAGS + ' -DLINUX_FLOAT'\n\nenv.Replace(CC = OPT_CC)\nenv.Replace(CXXFLAGS = Split(OPT_FLAGS))\n\n#Current path. Used for relative hierarchical building\nenv['CURRENT_PATH'] = '#'\n\n#The build directory for the current \n#platform. TODO: Add command line\n#support for cross compiling.\nenv['PLATFORM_BUILD'] = platform_build\n#Target directory where the libaries\n#will be built.\nenv['LIBTARGET'] = OPT_LIBTARGET\n\nenv.Append(CPPPATH = Split(OPT_INCLUDES))\n\n#env.Append(LIBPATH = Split(LIBSDIR))\n\nExport('env')\n\nprint \"Building to: \" + env['PLATFORM_BUILD']\n\n#TODO: Check for dependencies...?\n\nobjs = []\n\n#Map the 'build' subdirectory to be compile from 'src'\n\n#Run all the SConscripts in sub directories.\nfor subdir in Split(OPT_SUBDIRS):\n\t#Map a given subdirectory into the build directory...Let's see how this goes.\n\tenv.BuildDir( env['PLATFORM_BUILD'] + \"/\" + subdir, subdir, duplicate=0)\n\to = env.SConscript( env['PLATFORM_BUILD'] + \"/\" + subdir + '/SConscript')\n\t#o = senv.SConscript( senv['PLATFORM_BUILD'] + '/' + subdir + '/SConscript')\n\tobjs.append(o)\n\n#All the objects that were returned should be compiled \n#into the final OgreOpcode library\n#o = env.SConscript( env['PLATFORM_BUILD'] + os.path.sep + 'SConscript')\n\nenv.SharedLibrary(OPT_LIBTARGET + \"/\" + OPT_LIBNAME, o);\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import gc
import network
import lib.gate as gate
import time
from micropython import const
from ubluetooth import BLE
import lib.webserver as webserver
bt = BLE()
bt.active(True)
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
def byteToMac(addr):
m = memoryview(addr)
a = "{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}".format(m[0],m[1],m[2],m[3],m[4],m[5])
return a
def handler(event, data):
if event == _IRQ_SCAN_RESULT:
# A single scan result.
addr_type, addr, adv_type, rssi, adv_data = data
print(addr_type,memoryview(addr) , adv_type, rssi,memoryview( adv_data))
for i in addr:
print("{0:x}".format(i))
print(byteToMac(addr))
if addr == memoryview(bytearray(b'\x40\xe8\xe7\x85\x3d\xed')):
print("device found")
elif event == _IRQ_SCAN_DONE:
# Scan duration finished or manually stopped.
print("scan complete")
pass
def onAdd(addBT):
memoryview(addBT)
def onDelete(delBT):
print("onDelete")
bt.irq(handler)
ap = network.WLAN(network.AP_IF)
ap.active(True)
ap.config(essid="Test", password="1234",authmode= 0)
s = webserver.webserverstart()
lastscan = 0
while True:
webserver.webserver(s, onAdd, onDelete)
print("scanning soon")
if time.time() - lastscan > 10:
print("scanning now...")
bt.gap_scan(10000)
lastscan = time.time()
|
normal
|
{
"blob_id": "99c60befed32a9aa80b6e66b682d9f475e05a8d1",
"index": 2562,
"step-1": "<mask token>\n\n\ndef handler(event, data):\n if event == _IRQ_SCAN_RESULT:\n addr_type, addr, adv_type, rssi, adv_data = data\n print(addr_type, memoryview(addr), adv_type, rssi, memoryview(adv_data)\n )\n for i in addr:\n print('{0:x}'.format(i))\n print(byteToMac(addr))\n if addr == memoryview(bytearray(b'@\\xe8\\xe7\\x85=\\xed')):\n print('device found')\n elif event == _IRQ_SCAN_DONE:\n print('scan complete')\n pass\n\n\n<mask token>\n\n\ndef onDelete(delBT):\n print('onDelete')\n\n\n<mask token>\n",
"step-2": "<mask token>\nbt.active(True)\n<mask token>\n\n\ndef byteToMac(addr):\n m = memoryview(addr)\n a = '{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}'.format(m[0], m[1],\n m[2], m[3], m[4], m[5])\n return a\n\n\ndef handler(event, data):\n if event == _IRQ_SCAN_RESULT:\n addr_type, addr, adv_type, rssi, adv_data = data\n print(addr_type, memoryview(addr), adv_type, rssi, memoryview(adv_data)\n )\n for i in addr:\n print('{0:x}'.format(i))\n print(byteToMac(addr))\n if addr == memoryview(bytearray(b'@\\xe8\\xe7\\x85=\\xed')):\n print('device found')\n elif event == _IRQ_SCAN_DONE:\n print('scan complete')\n pass\n\n\ndef onAdd(addBT):\n memoryview(addBT)\n\n\ndef onDelete(delBT):\n print('onDelete')\n\n\nbt.irq(handler)\n<mask token>\nap.active(True)\nap.config(essid='Test', password='1234', authmode=0)\n<mask token>\nwhile True:\n webserver.webserver(s, onAdd, onDelete)\n print('scanning soon')\n if time.time() - lastscan > 10:\n print('scanning now...')\n bt.gap_scan(10000)\n lastscan = time.time()\n",
"step-3": "<mask token>\nbt = BLE()\nbt.active(True)\n_IRQ_SCAN_RESULT = const(5)\n_IRQ_SCAN_DONE = const(6)\n\n\ndef byteToMac(addr):\n m = memoryview(addr)\n a = '{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}'.format(m[0], m[1],\n m[2], m[3], m[4], m[5])\n return a\n\n\ndef handler(event, data):\n if event == _IRQ_SCAN_RESULT:\n addr_type, addr, adv_type, rssi, adv_data = data\n print(addr_type, memoryview(addr), adv_type, rssi, memoryview(adv_data)\n )\n for i in addr:\n print('{0:x}'.format(i))\n print(byteToMac(addr))\n if addr == memoryview(bytearray(b'@\\xe8\\xe7\\x85=\\xed')):\n print('device found')\n elif event == _IRQ_SCAN_DONE:\n print('scan complete')\n pass\n\n\ndef onAdd(addBT):\n memoryview(addBT)\n\n\ndef onDelete(delBT):\n print('onDelete')\n\n\nbt.irq(handler)\nap = network.WLAN(network.AP_IF)\nap.active(True)\nap.config(essid='Test', password='1234', authmode=0)\ns = webserver.webserverstart()\nlastscan = 0\nwhile True:\n webserver.webserver(s, onAdd, onDelete)\n print('scanning soon')\n if time.time() - lastscan > 10:\n print('scanning now...')\n bt.gap_scan(10000)\n lastscan = time.time()\n",
"step-4": "import gc\nimport network\nimport lib.gate as gate\nimport time\nfrom micropython import const\nfrom ubluetooth import BLE\nimport lib.webserver as webserver\nbt = BLE()\nbt.active(True)\n_IRQ_SCAN_RESULT = const(5)\n_IRQ_SCAN_DONE = const(6)\n\n\ndef byteToMac(addr):\n m = memoryview(addr)\n a = '{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}'.format(m[0], m[1],\n m[2], m[3], m[4], m[5])\n return a\n\n\ndef handler(event, data):\n if event == _IRQ_SCAN_RESULT:\n addr_type, addr, adv_type, rssi, adv_data = data\n print(addr_type, memoryview(addr), adv_type, rssi, memoryview(adv_data)\n )\n for i in addr:\n print('{0:x}'.format(i))\n print(byteToMac(addr))\n if addr == memoryview(bytearray(b'@\\xe8\\xe7\\x85=\\xed')):\n print('device found')\n elif event == _IRQ_SCAN_DONE:\n print('scan complete')\n pass\n\n\ndef onAdd(addBT):\n memoryview(addBT)\n\n\ndef onDelete(delBT):\n print('onDelete')\n\n\nbt.irq(handler)\nap = network.WLAN(network.AP_IF)\nap.active(True)\nap.config(essid='Test', password='1234', authmode=0)\ns = webserver.webserverstart()\nlastscan = 0\nwhile True:\n webserver.webserver(s, onAdd, onDelete)\n print('scanning soon')\n if time.time() - lastscan > 10:\n print('scanning now...')\n bt.gap_scan(10000)\n lastscan = time.time()\n",
"step-5": "import gc\r\nimport network\r\nimport lib.gate as gate\r\nimport time\r\nfrom micropython import const\r\nfrom ubluetooth import BLE\r\nimport lib.webserver as webserver\r\n\r\nbt = BLE()\r\nbt.active(True)\r\n\r\n_IRQ_SCAN_RESULT = const(5)\r\n_IRQ_SCAN_DONE = const(6)\r\n\r\ndef byteToMac(addr):\r\n m = memoryview(addr)\r\n a = \"{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}:{:0>2X}\".format(m[0],m[1],m[2],m[3],m[4],m[5])\r\n return a \r\n\r\ndef handler(event, data):\r\n if event == _IRQ_SCAN_RESULT:\r\n # A single scan result.\r\n addr_type, addr, adv_type, rssi, adv_data = data\r\n print(addr_type,memoryview(addr) , adv_type, rssi,memoryview( adv_data))\r\n for i in addr:\r\n print(\"{0:x}\".format(i))\r\n \r\n print(byteToMac(addr))\r\n if addr == memoryview(bytearray(b'\\x40\\xe8\\xe7\\x85\\x3d\\xed')):\r\n print(\"device found\")\r\n elif event == _IRQ_SCAN_DONE:\r\n # Scan duration finished or manually stopped.\r\n print(\"scan complete\")\r\n pass\r\n\r\ndef onAdd(addBT):\r\n memoryview(addBT)\r\n\r\ndef onDelete(delBT):\r\n print(\"onDelete\")\r\n\r\nbt.irq(handler)\r\n\r\nap = network.WLAN(network.AP_IF)\r\nap.active(True)\r\nap.config(essid=\"Test\", password=\"1234\",authmode= 0)\r\n\r\ns = webserver.webserverstart()\r\n\r\nlastscan = 0\r\nwhile True:\r\n webserver.webserver(s, onAdd, onDelete)\r\n print(\"scanning soon\")\r\n if time.time() - lastscan > 10:\r\n print(\"scanning now...\")\r\n bt.gap_scan(10000) \r\n lastscan = time.time()\r\n \r\n\r\n \r\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
from flask import request,Flask, render_template
from bs4 import BeautifulSoup as bs
from urllib.request import Request,urlopen
import re
app = Flask(__name__)
@app.route('/')
def addRegion():
return render_template('Website WordCount.html')
@app.route('/output_data', methods=['POST','GET'])
def output_data():
unique_links=[]
link_len={}
out_arr=[]
if request.method == 'POST':
url = request.form['url']
main = re.sub(r"([\w:///.]+com|info|in|org)([\w///?/=/&/_-]*)",r"\1",url,0, re.MULTILINE | re.UNICODE | re.IGNORECASE)
req =Request(main, headers={'User-Agent' : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30"})
sample=urlopen(req)
soap=bs(sample,"lxml")
for data in soap.find_all('a', href=True):
links=data['href']
links=links if links.startswith(main) else (str(main)+str(links) if links.startswith( '/' ) else str(main)+"/"+str(links))
if(links in unique_links):
continue
unique_links.append(links)
req =Request(links, headers={'User-Agent' : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30"})
sample1=urlopen(req)
soap1=bs(sample1,"lxml")
[x.extract() for x in soap1.findAll(['script', 'style'])]
data=soap1.text
stri=re.sub('[.,/!"@:+*&^%~#=-_]','',data)
stri=stri.split()
num_word=len(stri)
if(num_word<5):
continue
link_len['link']=links
link_len['wordCount']=num_word
out_arr.append(link_len)
print(out_arr)
return(out_arr)
if __name__ == '__main__':
app.run(debug = True,host='192.168.43.164')
|
normal
|
{
"blob_id": "11dfb09286b8a5742550b5300c776ed82e69ead5",
"index": 2577,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\[email protected]('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\[email protected]('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-4": "from flask import request, Flask, render_template\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import Request, urlopen\nimport re\napp = Flask(__name__)\n\n\[email protected]('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\[email protected]('/output_data', methods=['POST', 'GET'])\ndef output_data():\n unique_links = []\n link_len = {}\n out_arr = []\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub('([\\\\w:///.]+com|info|in|org)([\\\\w///?/=/&/_-]*)',\n '\\\\1', url, 0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req = Request(main, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample = urlopen(req)\n soap = bs(sample, 'lxml')\n for data in soap.find_all('a', href=True):\n links = data['href']\n links = links if links.startswith(main) else str(main) + str(links\n ) if links.startswith('/') else str(main) + '/' + str(links)\n if links in unique_links:\n continue\n unique_links.append(links)\n req = Request(links, headers={'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30'\n })\n sample1 = urlopen(req)\n soap1 = bs(sample1, 'lxml')\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data = soap1.text\n stri = re.sub('[.,/!\"@:+*&^%~#=-_]', '', data)\n stri = stri.split()\n num_word = len(stri)\n if num_word < 5:\n continue\n link_len['link'] = links\n link_len['wordCount'] = num_word\n out_arr.append(link_len)\n print(out_arr)\n return out_arr\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='192.168.43.164')\n",
"step-5": "from flask import request,Flask, render_template\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.request import Request,urlopen\nimport re\n\napp = Flask(__name__)\[email protected]('/')\ndef addRegion():\n return render_template('Website WordCount.html')\n\n\[email protected]('/output_data', methods=['POST','GET'])\n\ndef output_data():\n unique_links=[]\n link_len={}\n out_arr=[]\n if request.method == 'POST':\n url = request.form['url']\n main = re.sub(r\"([\\w:///.]+com|info|in|org)([\\w///?/=/&/_-]*)\",r\"\\1\",url,0, re.MULTILINE | re.UNICODE | re.IGNORECASE)\n req =Request(main, headers={'User-Agent' : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30\"})\n sample=urlopen(req)\n soap=bs(sample,\"lxml\")\n for data in soap.find_all('a', href=True):\n links=data['href']\n links=links if links.startswith(main) else (str(main)+str(links) if links.startswith( '/' ) else str(main)+\"/\"+str(links))\n if(links in unique_links):\n continue\n unique_links.append(links)\n req =Request(links, headers={'User-Agent' : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30\"})\n sample1=urlopen(req)\n soap1=bs(sample1,\"lxml\")\n [x.extract() for x in soap1.findAll(['script', 'style'])]\n data=soap1.text\n stri=re.sub('[.,/!\"@:+*&^%~#=-_]','',data)\n stri=stri.split()\n num_word=len(stri)\n if(num_word<5):\n continue\n link_len['link']=links\n link_len['wordCount']=num_word\n out_arr.append(link_len)\n print(out_arr)\n return(out_arr)\n\nif __name__ == '__main__':\n app.run(debug = True,host='192.168.43.164')\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
### 15/04/2020
### Author: Omer Goder
### Looping through a list
months = ['january','fabruary','march','april','may','june','july','august','september','october','november','december']
# Using a for loop to print a list
for month in months:
print("The next month is:\t" + month)
print('\n')
print("\nEnd of program\n") # Print out once - not in the loop
#example for indexing using enumeration (considers non-pythonic)
#for index, month in enumerate(months):
# print(index, month.title() + " is a name of a month\n")
|
normal
|
{
"blob_id": "bc8bc5c3b6954302d005fe618827c644f93ad14e",
"index": 6030,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor month in months:\n print('The next month is:\\t' + month)\n print('\\n')\nprint(\"\"\"\nEnd of program\n\"\"\")\n",
"step-3": "months = ['january', 'fabruary', 'march', 'april', 'may', 'june', 'july',\n 'august', 'september', 'october', 'november', 'december']\nfor month in months:\n print('The next month is:\\t' + month)\n print('\\n')\nprint(\"\"\"\nEnd of program\n\"\"\")\n",
"step-4": "### 15/04/2020\r\n### Author: Omer Goder\r\n### Looping through a list\r\n\r\nmonths = ['january','fabruary','march','april','may','june','july','august','september','october','november','december']\r\n\r\n# Using a for loop to print a list\r\nfor month in months:\r\n\tprint(\"The next month is:\\t\" + month)\r\n\tprint('\\n')\r\nprint(\"\\nEnd of program\\n\") # Print out once - not in the loop\r\n\r\n\r\n#example for indexing using enumeration (considers non-pythonic)\r\n#for index, month in enumerate(months):\r\n\t# print(index, month.title() + \" is a name of a month\\n\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Rectangle():
def __init__(self,length,breadth):
self.length=length
self.breadth=breadth
def area(self):
return(self.length*self.breadth)
def perimeter(self):
return(2*(self.length+self.breadth))
r1=Rectangle(4,5)
r2=Rectangle(5,7)
a1=r1.area()
a2=r2.area()
p1=r1.perimeter()
p2=r2.perimeter()
print("the area of rectangle 1 :",a1)
print("the perimeter of rectangle 1:",p1)
print("the area of rectangle 2:",a2)
print("the perimeter of rectangle 2:",p2)
if(a1>a2):
print("rectangle 1 is bigger")
else:
print("rectangle 2 is bigger")
|
normal
|
{
"blob_id": "d5691403812cd3742f8e8b74d4ca613eca784ffd",
"index": 9677,
"step-1": "class Rectangle:\n <mask token>\n <mask token>\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-2": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n <mask token>\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-3": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n\n def area(self):\n return self.length * self.breadth\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\n<mask token>\n",
"step-4": "class Rectangle:\n\n def __init__(self, length, breadth):\n self.length = length\n self.breadth = breadth\n\n def area(self):\n return self.length * self.breadth\n\n def perimeter(self):\n return 2 * (self.length + self.breadth)\n\n\nr1 = Rectangle(4, 5)\nr2 = Rectangle(5, 7)\na1 = r1.area()\na2 = r2.area()\np1 = r1.perimeter()\np2 = r2.perimeter()\nprint('the area of rectangle 1 :', a1)\nprint('the perimeter of rectangle 1:', p1)\nprint('the area of rectangle 2:', a2)\nprint('the perimeter of rectangle 2:', p2)\nif a1 > a2:\n print('rectangle 1 is bigger')\nelse:\n print('rectangle 2 is bigger')\n",
"step-5": "class Rectangle():\n def __init__(self,length,breadth):\n self.length=length\n self.breadth=breadth\n def area(self):\n return(self.length*self.breadth)\n def perimeter(self):\n return(2*(self.length+self.breadth))\nr1=Rectangle(4,5)\nr2=Rectangle(5,7)\na1=r1.area()\na2=r2.area()\np1=r1.perimeter()\np2=r2.perimeter()\nprint(\"the area of rectangle 1 :\",a1)\nprint(\"the perimeter of rectangle 1:\",p1)\nprint(\"the area of rectangle 2:\",a2)\nprint(\"the perimeter of rectangle 2:\",p2)\nif(a1>a2):\n print(\"rectangle 1 is bigger\")\nelse:\n print(\"rectangle 2 is bigger\")",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import turtle
hexagon = turtle.Turtle()
for i in range(6):
hexagon.forward(100)
hexagon.left(60)
|
normal
|
{
"blob_id": "f6401eca2dc0ea86a934e859c35fa2d6c85a61b3",
"index": 8695,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-3": "<mask token>\nhexagon = turtle.Turtle()\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-4": "import turtle\nhexagon = turtle.Turtle()\nfor i in range(6):\n hexagon.forward(100)\n hexagon.left(60)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Python Challenge - Level 1 - What about making trans?
"""
import string
#import requests
#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')
#res.raise_for_status()
#print(res.text)
INPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz
OUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab
TRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)
CYPHER_TEXT = """g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \
amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \
rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \
ynnjw ml rfc spj."""
#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))
# The encrypted text told us to apply the same translation to the url
#print('map'.translate(TRANSLATION_TABLE)) # solution here
# Success, let's print out the next level url
print('http://www.pythonchallenge.com/pc/def/ocr.html')
|
normal
|
{
"blob_id": "3c03f71ef9de8825ecd7c89208c79f43c9fb7a56",
"index": 9594,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-3": "<mask token>\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-4": "<mask token>\nimport string\nINPUT_TEXT = string.ascii_lowercase\nOUTPUT_TEXT = INPUT_TEXT[2:] + INPUT_TEXT[:2]\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = (\n \"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\n )\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-5": "\"\"\"\nPython Challenge - Level 1 - What about making trans?\n\"\"\"\nimport string\n#import requests\n#res = requests.get('http://www.pythonchallenge.com/pc/def/map.html')\n#res.raise_for_status()\n#print(res.text)\n\nINPUT_TEXT = string.ascii_lowercase # abcdefghijklmnopqrstuvwxyz\nOUTPUT_TEXT = INPUT_TEXT[2:]+INPUT_TEXT[:2] # cdefghijklmnopqrstuvwxyzab\nTRANSLATION_TABLE = str.maketrans(INPUT_TEXT, OUTPUT_TEXT)\nCYPHER_TEXT = \"\"\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr \\\namknsrcpq ypc dmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw \\\nrfgq rcvr gq qm jmle. sqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu \\\nynnjw ml rfc spj.\"\"\"\n\n#print(CYPHER_TEXT.translate(TRANSLATION_TABLE))\n\n# The encrypted text told us to apply the same translation to the url\n#print('map'.translate(TRANSLATION_TABLE)) # solution here\n\n# Success, let's print out the next level url\nprint('http://www.pythonchallenge.com/pc/def/ocr.html')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from scipy.stats import itemfreq
from sklearn.model_selection import StratifiedKFold
from keras_utils.keras_utils import *
from keras.utils.np_utils import to_categorical
from keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten
from keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda
from keras.models import Model
from keras import backend as K
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
class MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):
def __init__(self, **kwargs):
super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)
self.supports_masking = True
class MaskableFlatten(Flatten):
def __init__(self, **kwargs):
super(MaskableFlatten, self).__init__(**kwargs)
self.supports_masking = True
# train data path
DATA1_TRAIN_PATH = '../data/data_1_train.csv'
DATA2_TRAIN_PATH = '../data/data_2_train.csv'
# GLoVe pre-trained word vectors path
EMBEDDING_DIR = '../embeddings/'
EMBEDDING_TYPE = 'glove.6B.300d.txt' # glove.6B.300d.txt
EMBEDDING_PICKLE_DIR = 'embeddings_index.p'
EMBEDDING_ERROR_DIR = 'embeddings_error.p'
ASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'
# tokenizer path
TOKENIZER_DIR = 'embeddings/tokenizer.p'
MAX_SEQ_LENGTH = 60
MAX_NB_WORDS = 95000
EMBEDDING_DIM = 300
# aspect dictionary
aspect_dict = {}
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Shared embedding layer = reduce # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later)
aspect embedding = aspect sentence passed through embedding layer
On this aspect embedding, use attention mechanism to jointly learn what is the "best" augmentation to the sentence embedding
- Dense layer that maps 1 : 1 between the aspect embedding and the aspect attention
- Softmax forces it to choose the "parts" of the sentence that help the most in training
- No bias needed for attention
- Next is to actually augment the aspect embeddings with this learned attention
- The element-wise multiplication forces many embeddings to become close to zero
- Only a few will remain "strong" after this multiplication. These are the "important" words in the aspect sentence
Finally, augment the original sentence embeddings with the attended aspect embeddings
- This will "add" some strength to the embeddings of the "important" words
- Remaining words will not be impacted at all (since they are added with near zero values)
Benefits of this model
- Choose if you want to send a unique aspect sentence for the corresponding sentence
- By this I mean, you have a choice
- 1) Use the original sentence as aspect input.
In doing so, it is basically like saying learn on your own what the aspect word is
It may not give much benefit, as the attended vector has the chance of being all equal (no attention)
- 2) Use a true aspect encoding as the aspect input.
Since you are sharing the embedding now, you cannot use random / own assigned aspects anymore.
The aspect ids that you pass will now be from the original embedding matrix using the word_index
dict that Keras gives you.
In this case, an aspect sentence would be of the form :
[0 0 ... 32506 66049 5968 0 0 ...]
Here 32506 = "Apple", 66049 = "Macbook" 5968 = "Pro" (say)
"""
NUM_CLASSES = 3 # 0 = neg, 1 = neutral, 2 = pos
MAX_SENTENCE_LENGTH = 60
MAX_NUM_WORDS = 20000 # this will be number of unique "words" (n-grams etc) there are
MAX_NUM_ASPECT_WORDS = 300 # this will be the number of unique aspect "words" (uni-grams only)
EMBEDDING_DIM = 300
EMBEDDING_WEIGHTS = None
MASK_ZEROS = True # this can be true ONLY for RNN models. If even 1 CNN is there, it will crash
#
# embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
# weights=EMBEDDING_WEIGHTS, trainable=False)
#
# sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
# aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
#
# sentence_embedding = embedding(sentence_ip) # Note: these are same embedding layer
# aspect_embedding = embedding(aspect_ip) # Note: these are same embedding layer
#
# # Create the attention vector for the aspect embeddings
# aspect_attention = Dense(EMBEDDING_DIM, activation='softmax', use_bias=False,
# name='aspect_attention')(aspect_embedding)
#
# # dampen the aspect embeddings according to the attention with an element-wise multiplication
# aspect_embedding = multiply([aspect_embedding, aspect_attention])
#
# # augment the sample embedding with information from the attended aspect embedding
# sentence_embedding = add([sentence_embedding, aspect_embedding])
#
# # now you can continue with whatever layer other than CNNs
#
# x = LSTM(100)(sentence_embedding)
# x = Dense(NUM_CLASSES, activation='softmax')(x)
#
# model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
#
# model.summary()
#
#
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='shared_embedding.png', show_shapes=False, show_layer_names=True)
#
"""
What this model does:
2 ip - 1 op model : 2 ip = sentence and aspect sentence
Disjoing embedding layer = more # of params and chance to overfit.
sentence embedding = sentence passed through embedding layer (keep for later ; not learned)
aspect embedding = aspect sentence passed through embedding layer (learned)
Benefits of this model
- Use a true aspect encoding as the aspect input.
Since you are learning the embedding now, you can use own assigned aspects.
In this case, an aspect sentence would be of the form :
[0 0 ... 2 2 2 0 0 ...]
Here 2 = "Apple", 2 = "Macbook" 2 = "Pro" (say)
Therefore, the id is given by you, and is shared over all of the aspect words for a given aspect term.
"""
def output_shape(input_shape):
shape = list(input_shape)
shape[-1] /= 2
print(shape)
return tuple(shape)
def model_2():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
# labels = [x+1 for x in labels]
print(itemfreq(labels))
indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
np.random.shuffle(indices)
padded_sequences = padded_sequences[indices]
labels = to_categorical(labels, num_classes=NUM_CLASSES)
labels = labels[indices]
aspect_sequences = aspect_sequences[indices]
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# aspect_embedding = Embedding(MAX_NUM_ASPECT_WORDS, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
# this needs to be True
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
# now you can continue with whatever layer other than CNNs
# x = MaskedGlobalAveragePooling1D()(sentence_embedding)
# x = MaskableFlatten()(sentence_embedding)
x = LSTM(256)(sentence_embedding)
# y = Lambda(lambda z: z[:, :, :NUM_CELLS//2], output_shape=output_shape)(x)
# x = Dense(NUM_CELLS//2, activation='softmax', use_bias=False)(x)
# x = multiply([x, y])
# x = MaskedGlobalAveragePooling1D()(x)
# x = Dense(256, activation='linear', kernel_initializer='he_normal')(x)
# x = BatchNormalization()(x)
# x = LeakyReLU()(x)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])
print(model.summary())
model.fit([padded_sequences, aspect_sequences], labels, epochs=10, verbose=1, validation_split=0.2)
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='learned_embedding.png', show_shapes=False, show_layer_names=True)
def model_2_CV():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(tech_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
# Random shuffling of padded, aspect sequences and labels
# indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)
# np.random.shuffle(indices)
# padded_sequences = padded_sequences[indices]
# labels = to_categorical(labels, num_classes=NUM_CLASSES)
# labels = labels[indices]
# aspect_sequences = aspect_sequences[indices]
print(labels.shape)
N_FOLDS = 3
fbeta_scores = []
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, aspect_train, y_train = padded_sequences[train_idx], aspect_sequences[train_idx], \
labels[train_idx]
sentence_test, aspect_test, y_test = padded_sequences[test_idx], aspect_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings
# Create the attention vector for the aspect embeddings
aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,
name='aspect_attention')(aspect_embedding)
# dampen the aspect embeddings according to the attention with an element-wise multiplication
aspect_embedding = multiply([aspect_embedding, aspect_attention])
# augment the sample embedding with information from the attended aspect embedding
sentence_embedding = concatenate([sentence_embedding, aspect_embedding])
x = LSTM(256)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', fbeta_score])
print(model.summary())
model.fit([sentence_train, aspect_train], y_train, epochs=5, verbose=1,
validation_data=([sentence_test, aspect_test], y_test))
scores = model.evaluate([sentence_test, aspect_test], y_test)
fbeta_scores.append(scores[-1])
print("Average fbeta score : ", sum(fbeta_scores) / len(fbeta_scores))
def model_3():
K.clear_session()
tech_reviews, food_reviews = load_and_clean()
embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)
labels = np.array([x + 1 for x in labels])
print(itemfreq(labels))
N_FOLDS = 10
skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)
f = open('history.txt', 'w+')
for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):
print('Fold %d' % (j + 1))
sentence_train, y_train = padded_sequences[train_idx], labels[train_idx]
sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]
y_train = to_categorical(y_train, 3)
y_test = to_categorical(y_test, 3)
sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,
weights=EMBEDDING_WEIGHTS, trainable=False)
# labels = to_categorical(labels, 3)
sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')
sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings
x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)
x = Dense(3, activation='softmax')(x)
model = Model(inputs=sentence_ip, outputs=x)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1, precision, recall])
print(model.summary())
history = model.fit(sentence_train, y_train, epochs=10, verbose=1, validation_data=(sentence_test, y_test))
f.write('\nFold %d\n' % (j + 1))
f.write(str(history.history['acc']))
f.write(str(history.history['val_acc']))
f.write(str(history.history['f1']))
f.write(str(history.history['precision']))
f.write(str(history.history['recall']))
if __name__ == '__main__':
model_3()
|
normal
|
{
"blob_id": "0b125e7e9e763d4fd71e381ca823f9e9aa8ea606",
"index": 8198,
"step-1": "<mask token>\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\n<mask token>\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\nEMBEDDING_DIR = '../embeddings/'\nEMBEDDING_TYPE = 'glove.6B.300d.txt'\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\nMAX_SEQ_LENGTH = 60\nMAX_NB_WORDS = 95000\nEMBEDDING_DIM = 300\naspect_dict = {}\n<mask token>\nNUM_CLASSES = 3\nMAX_SENTENCE_LENGTH = 60\nMAX_NUM_WORDS = 20000\nMAX_NUM_ASPECT_WORDS = 300\nEMBEDDING_DIM = 300\nEMBEDDING_WEIGHTS = None\nMASK_ZEROS = True\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\nif __name__ == '__main__':\n model_3()\n",
"step-4": "from scipy.stats import itemfreq\nfrom sklearn.model_selection import StratifiedKFold\nfrom keras_utils.keras_utils import *\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten\nfrom keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda\nfrom keras.models import Model\nfrom keras import backend as K\n\n\ndef f1(y_true, y_pred):\n\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n\n Only computes a batch-wise average of recall.\n\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n\n Only computes a batch-wise average of precision.\n\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2 * (precision * recall / (precision + recall + K.epsilon()))\n\n\ndef precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n\n\ndef recall(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\n\n def __init__(self, **kwargs):\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nclass MaskableFlatten(Flatten):\n\n def __init__(self, **kwargs):\n super(MaskableFlatten, self).__init__(**kwargs)\n self.supports_masking = True\n\n\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\nEMBEDDING_DIR = '../embeddings/'\nEMBEDDING_TYPE = 'glove.6B.300d.txt'\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\nMAX_SEQ_LENGTH = 60\nMAX_NB_WORDS = 95000\nEMBEDDING_DIM = 300\naspect_dict = {}\n<mask token>\nNUM_CLASSES = 3\nMAX_SENTENCE_LENGTH = 60\nMAX_NUM_WORDS = 20000\nMAX_NUM_ASPECT_WORDS = 300\nEMBEDDING_DIM = 300\nEMBEDDING_WEIGHTS = None\nMASK_ZEROS = True\n<mask token>\n\n\ndef output_shape(input_shape):\n shape = list(input_shape)\n shape[-1] /= 2\n print(shape)\n return tuple(shape)\n\n\ndef model_2():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n print(itemfreq(labels))\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\n np.random.shuffle(indices)\n padded_sequences = padded_sequences[indices]\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\n labels = labels[indices]\n aspect_sequences = aspect_sequences[indices]\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS, trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=\n False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc'])\n print(model.summary())\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10,\n verbose=1, validation_split=0.2)\n\n\ndef model_2_CV():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(tech_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n print(labels.shape)\n N_FOLDS = 3\n fbeta_scores = []\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, aspect_train, y_train = padded_sequences[train_idx\n ], aspect_sequences[train_idx], labels[train_idx]\n sentence_test, aspect_test, y_test = padded_sequences[test_idx\n ], aspect_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM,\n mask_zero=MASK_ZEROS, trainable=True)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n aspect_embedding = aspect_embedding(aspect_ip)\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid',\n use_bias=False, name='aspect_attention')(aspect_embedding)\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding]\n )\n x = LSTM(256)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', fbeta_score])\n print(model.summary())\n model.fit([sentence_train, aspect_train], y_train, epochs=5,\n verbose=1, validation_data=([sentence_test, aspect_test], y_test))\n scores = model.evaluate([sentence_test, aspect_test], y_test)\n fbeta_scores.append(scores[-1])\n print('Average fbeta score : ', sum(fbeta_scores) / len(fbeta_scores))\n\n\ndef model_3():\n K.clear_session()\n tech_reviews, food_reviews = load_and_clean()\n embedding_matrix, aspect_sequences, padded_sequences, labels = (\n load_embedding_matrix(food_reviews))\n labels = np.array([(x + 1) for x in labels])\n print(itemfreq(labels))\n N_FOLDS = 10\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\n f = open('history.txt', 'w+')\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences,\n labels)):\n print('Fold %d' % (j + 1))\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx\n ]\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\n y_train = to_categorical(y_train, 3)\n y_test = to_categorical(y_test, 3)\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=\n EMBEDDING_DIM, mask_zero=MASK_ZEROS, weights=EMBEDDING_WEIGHTS,\n trainable=False)\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\n sentence_embedding = sentence_embedding(sentence_ip)\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\n x = Dense(3, activation='softmax')(x)\n model = Model(inputs=sentence_ip, outputs=x)\n model.compile(optimizer='adam', loss='categorical_crossentropy',\n metrics=['acc', f1, precision, recall])\n print(model.summary())\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1,\n validation_data=(sentence_test, y_test))\n f.write('\\nFold %d\\n' % (j + 1))\n f.write(str(history.history['acc']))\n f.write(str(history.history['val_acc']))\n f.write(str(history.history['f1']))\n f.write(str(history.history['precision']))\n f.write(str(history.history['recall']))\n\n\nif __name__ == '__main__':\n model_3()\n",
"step-5": "from scipy.stats import itemfreq\r\nfrom sklearn.model_selection import StratifiedKFold\r\n\r\nfrom keras_utils.keras_utils import *\r\n\r\nfrom keras.utils.np_utils import to_categorical\r\nfrom keras.layers import Input, Embedding, Dense, GlobalAveragePooling1D, Flatten\r\nfrom keras.layers import add, multiply, LSTM, Bidirectional, BatchNormalization, LeakyReLU, concatenate, Lambda\r\nfrom keras.models import Model\r\nfrom keras import backend as K\r\n\r\n\r\ndef f1(y_true, y_pred):\r\n def recall(y_true, y_pred):\r\n \"\"\"Recall metric.\r\n\r\n Only computes a batch-wise average of recall.\r\n\r\n Computes the recall, a metric for multi-label classification of\r\n how many relevant items are selected.\r\n \"\"\"\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n def precision(y_true, y_pred):\r\n \"\"\"Precision metric.\r\n\r\n Only computes a batch-wise average of precision.\r\n\r\n Computes the precision, a metric for multi-label classification of\r\n how many selected items are relevant.\r\n \"\"\"\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n precision = precision(y_true, y_pred)\r\n recall = recall(y_true, y_pred)\r\n return 2 * ((precision * recall) / (precision + recall + K.epsilon()))\r\n\r\n\r\ndef precision(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\r\n precision = true_positives / (predicted_positives + K.epsilon())\r\n return precision\r\n\r\n\r\ndef recall(y_true, y_pred):\r\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\r\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\r\n recall = true_positives / (possible_positives + K.epsilon())\r\n return recall\r\n\r\n\r\nclass MaskedGlobalAveragePooling1D(GlobalAveragePooling1D):\r\n\r\n def __init__(self, **kwargs):\r\n super(MaskedGlobalAveragePooling1D, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n\r\n\r\nclass MaskableFlatten(Flatten):\r\n\r\n def __init__(self, **kwargs):\r\n super(MaskableFlatten, self).__init__(**kwargs)\r\n self.supports_masking = True\r\n\r\n\r\n# train data path\r\nDATA1_TRAIN_PATH = '../data/data_1_train.csv'\r\nDATA2_TRAIN_PATH = '../data/data_2_train.csv'\r\n\r\n# GLoVe pre-trained word vectors path\r\nEMBEDDING_DIR = '../embeddings/'\r\nEMBEDDING_TYPE = 'glove.6B.300d.txt' # glove.6B.300d.txt\r\nEMBEDDING_PICKLE_DIR = 'embeddings_index.p'\r\nEMBEDDING_ERROR_DIR = 'embeddings_error.p'\r\nASPECT_EMBEDDING_DIR = 'aspect_embeddings.p'\r\n\r\n# tokenizer path\r\nTOKENIZER_DIR = 'embeddings/tokenizer.p'\r\n\r\nMAX_SEQ_LENGTH = 60\r\nMAX_NB_WORDS = 95000\r\nEMBEDDING_DIM = 300\r\n\r\n# aspect dictionary\r\naspect_dict = {}\r\n\r\n\"\"\"\r\nWhat this model does:\r\n\r\n2 ip - 1 op model : 2 ip = sentence and aspect sentence\r\n\r\nShared embedding layer = reduce # of params and chance to overfit.\r\nsentence embedding = sentence passed through embedding layer (keep for later)\r\naspect embedding = aspect sentence passed through embedding layer \r\n\r\nOn this aspect embedding, use attention mechanism to jointly learn what is the \"best\" augmentation to the sentence embedding\r\n- Dense layer that maps 1 : 1 between the aspect embedding and the aspect attention\r\n - Softmax forces it to choose the \"parts\" of the sentence that help the most in training\r\n - No bias needed for attention\r\n\r\n- Next is to actually augment the aspect embeddings with this learned attention\r\n - The element-wise multiplication forces many embeddings to become close to zero\r\n - Only a few will remain \"strong\" after this multiplication. These are the \"important\" words in the aspect sentence\r\n\r\nFinally, augment the original sentence embeddings with the attended aspect embeddings\r\n- This will \"add\" some strength to the embeddings of the \"important\" words\r\n- Remaining words will not be impacted at all (since they are added with near zero values)\r\n\r\nBenefits of this model\r\n- Choose if you want to send a unique aspect sentence for the corresponding sentence\r\n - By this I mean, you have a choice\r\n - 1) Use the original sentence as aspect input.\r\n In doing so, it is basically like saying learn on your own what the aspect word is\r\n It may not give much benefit, as the attended vector has the chance of being all equal (no attention)\r\n - 2) Use a true aspect encoding as the aspect input.\r\n Since you are sharing the embedding now, you cannot use random / own assigned aspects anymore.\r\n The aspect ids that you pass will now be from the original embedding matrix using the word_index\r\n dict that Keras gives you.\r\n\r\n In this case, an aspect sentence would be of the form : \r\n [0 0 ... 32506 66049 5968 0 0 ...] \r\n Here 32506 = \"Apple\", 66049 = \"Macbook\" 5968 = \"Pro\" (say)\r\n\r\n\"\"\"\r\n\r\nNUM_CLASSES = 3 # 0 = neg, 1 = neutral, 2 = pos\r\n\r\nMAX_SENTENCE_LENGTH = 60\r\nMAX_NUM_WORDS = 20000 # this will be number of unique \"words\" (n-grams etc) there are\r\nMAX_NUM_ASPECT_WORDS = 300 # this will be the number of unique aspect \"words\" (uni-grams only)\r\n\r\nEMBEDDING_DIM = 300\r\nEMBEDDING_WEIGHTS = None\r\n\r\nMASK_ZEROS = True # this can be true ONLY for RNN models. If even 1 CNN is there, it will crash\r\n\r\n#\r\n# embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n# weights=EMBEDDING_WEIGHTS, trainable=False)\r\n#\r\n# sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n# aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n#\r\n# sentence_embedding = embedding(sentence_ip) # Note: these are same embedding layer\r\n# aspect_embedding = embedding(aspect_ip) # Note: these are same embedding layer\r\n#\r\n# # Create the attention vector for the aspect embeddings\r\n# aspect_attention = Dense(EMBEDDING_DIM, activation='softmax', use_bias=False,\r\n# name='aspect_attention')(aspect_embedding)\r\n#\r\n# # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n# aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n#\r\n# # augment the sample embedding with information from the attended aspect embedding\r\n# sentence_embedding = add([sentence_embedding, aspect_embedding])\r\n#\r\n# # now you can continue with whatever layer other than CNNs\r\n#\r\n# x = LSTM(100)(sentence_embedding)\r\n# x = Dense(NUM_CLASSES, activation='softmax')(x)\r\n#\r\n# model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n#\r\n# model.summary()\r\n#\r\n#\r\n# from keras.utils.vis_utils import plot_model\r\n# plot_model(model, to_file='shared_embedding.png', show_shapes=False, show_layer_names=True)\r\n#\r\n\r\n\"\"\"\r\nWhat this model does:\r\n\r\n2 ip - 1 op model : 2 ip = sentence and aspect sentence\r\n\r\nDisjoing embedding layer = more # of params and chance to overfit.\r\nsentence embedding = sentence passed through embedding layer (keep for later ; not learned)\r\naspect embedding = aspect sentence passed through embedding layer (learned)\r\n\r\nBenefits of this model\r\n- Use a true aspect encoding as the aspect input.\r\n Since you are learning the embedding now, you can use own assigned aspects.\r\n \r\n In this case, an aspect sentence would be of the form : \r\n [0 0 ... 2 2 2 0 0 ...] \r\n Here 2 = \"Apple\", 2 = \"Macbook\" 2 = \"Pro\" (say)\r\n Therefore, the id is given by you, and is shared over all of the aspect words for a given aspect term.\r\n\r\n\"\"\"\r\n\r\n\r\ndef output_shape(input_shape):\r\n shape = list(input_shape)\r\n shape[-1] /= 2\r\n print(shape)\r\n return tuple(shape)\r\n\r\n\r\ndef model_2():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)\r\n # labels = [x+1 for x in labels]\r\n print(itemfreq(labels))\r\n\r\n indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\r\n np.random.shuffle(indices)\r\n padded_sequences = padded_sequences[indices]\r\n labels = to_categorical(labels, num_classes=NUM_CLASSES)\r\n labels = labels[indices]\r\n aspect_sequences = aspect_sequences[indices]\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n\r\n # aspect_embedding = Embedding(MAX_NUM_ASPECT_WORDS, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n # this needs to be True\r\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings\r\n\r\n # Create the attention vector for the aspect embeddings\r\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,\r\n name='aspect_attention')(aspect_embedding)\r\n\r\n # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n # augment the sample embedding with information from the attended aspect embedding\r\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\r\n\r\n # now you can continue with whatever layer other than CNNs\r\n\r\n # x = MaskedGlobalAveragePooling1D()(sentence_embedding)\r\n # x = MaskableFlatten()(sentence_embedding)\r\n x = LSTM(256)(sentence_embedding)\r\n # y = Lambda(lambda z: z[:, :, :NUM_CELLS//2], output_shape=output_shape)(x)\r\n # x = Dense(NUM_CELLS//2, activation='softmax', use_bias=False)(x)\r\n\r\n # x = multiply([x, y])\r\n # x = MaskedGlobalAveragePooling1D()(x)\r\n # x = Dense(256, activation='linear', kernel_initializer='he_normal')(x)\r\n # x = BatchNormalization()(x)\r\n # x = LeakyReLU()(x)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\r\n\r\n print(model.summary())\r\n\r\n model.fit([padded_sequences, aspect_sequences], labels, epochs=10, verbose=1, validation_split=0.2)\r\n\r\n # from keras.utils.vis_utils import plot_model\r\n # plot_model(model, to_file='learned_embedding.png', show_shapes=False, show_layer_names=True)\r\n\r\n\r\ndef model_2_CV():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(tech_reviews)\r\n labels = np.array([x + 1 for x in labels])\r\n print(itemfreq(labels))\r\n\r\n # Random shuffling of padded, aspect sequences and labels\r\n # indices = np.arange(0, padded_sequences.shape[0], step=1, dtype=int)\r\n # np.random.shuffle(indices)\r\n # padded_sequences = padded_sequences[indices]\r\n # labels = to_categorical(labels, num_classes=NUM_CLASSES)\r\n # labels = labels[indices]\r\n # aspect_sequences = aspect_sequences[indices]\r\n print(labels.shape)\r\n\r\n N_FOLDS = 3\r\n fbeta_scores = []\r\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\r\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):\r\n print('Fold %d' % (j + 1))\r\n sentence_train, aspect_train, y_train = padded_sequences[train_idx], aspect_sequences[train_idx], \\\r\n labels[train_idx]\r\n sentence_test, aspect_test, y_test = padded_sequences[test_idx], aspect_sequences[test_idx], labels[test_idx]\r\n\r\n y_train = to_categorical(y_train, 3)\r\n y_test = to_categorical(y_test, 3)\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n aspect_embedding = Embedding(len(aspect_dict) + 1, EMBEDDING_DIM, mask_zero=MASK_ZEROS, trainable=True)\r\n\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n aspect_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n aspect_embedding = aspect_embedding(aspect_ip) # Note: these are two different embeddings\r\n\r\n # Create the attention vector for the aspect embeddings\r\n aspect_attention = Dense(EMBEDDING_DIM, activation='sigmoid', use_bias=False,\r\n name='aspect_attention')(aspect_embedding)\r\n # dampen the aspect embeddings according to the attention with an element-wise multiplication\r\n aspect_embedding = multiply([aspect_embedding, aspect_attention])\r\n # augment the sample embedding with information from the attended aspect embedding\r\n sentence_embedding = concatenate([sentence_embedding, aspect_embedding])\r\n x = LSTM(256)(sentence_embedding)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=[sentence_ip, aspect_ip], outputs=x)\r\n\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', fbeta_score])\r\n\r\n print(model.summary())\r\n\r\n model.fit([sentence_train, aspect_train], y_train, epochs=5, verbose=1,\r\n validation_data=([sentence_test, aspect_test], y_test))\r\n\r\n scores = model.evaluate([sentence_test, aspect_test], y_test)\r\n fbeta_scores.append(scores[-1])\r\n\r\n print(\"Average fbeta score : \", sum(fbeta_scores) / len(fbeta_scores))\r\n\r\n\r\ndef model_3():\r\n K.clear_session()\r\n tech_reviews, food_reviews = load_and_clean()\r\n embedding_matrix, aspect_sequences, padded_sequences, labels = load_embedding_matrix(food_reviews)\r\n labels = np.array([x + 1 for x in labels])\r\n print(itemfreq(labels))\r\n\r\n N_FOLDS = 10\r\n skf = StratifiedKFold(N_FOLDS, shuffle=True, random_state=1000)\r\n f = open('history.txt', 'w+')\r\n for j, (train_idx, test_idx) in enumerate(skf.split(padded_sequences, labels)):\r\n print('Fold %d' % (j + 1))\r\n sentence_train, y_train = padded_sequences[train_idx], labels[train_idx]\r\n sentence_test, y_test = padded_sequences[test_idx], labels[test_idx]\r\n\r\n y_train = to_categorical(y_train, 3)\r\n y_test = to_categorical(y_test, 3)\r\n\r\n sentence_embedding = Embedding(MAX_NUM_WORDS, output_dim=EMBEDDING_DIM, mask_zero=MASK_ZEROS,\r\n weights=EMBEDDING_WEIGHTS, trainable=False)\r\n # labels = to_categorical(labels, 3)\r\n sentence_ip = Input(shape=(MAX_SENTENCE_LENGTH,), dtype='int32')\r\n sentence_embedding = sentence_embedding(sentence_ip) # Note: these are two different embeddings\r\n x = LSTM(256, dropout=0.2, recurrent_dropout=0.2)(sentence_embedding)\r\n x = Dense(3, activation='softmax')(x)\r\n model = Model(inputs=sentence_ip, outputs=x)\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc', f1, precision, recall])\r\n print(model.summary())\r\n history = model.fit(sentence_train, y_train, epochs=10, verbose=1, validation_data=(sentence_test, y_test))\r\n f.write('\\nFold %d\\n' % (j + 1))\r\n f.write(str(history.history['acc']))\r\n f.write(str(history.history['val_acc']))\r\n f.write(str(history.history['f1']))\r\n f.write(str(history.history['precision']))\r\n f.write(str(history.history['recall']))\r\n\r\n\r\nif __name__ == '__main__':\r\n model_3()\r\n",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
# -*- coding: utf-8 -*-
import random
from cocos.actions import MoveTo, CallFuncS
from cocos.sprite import Sprite
import define
def kill(spr):
spr.unschedule(spr.update)
arena = spr.parent.parent
if not spr.is_big:
arena.batch.add(Dot())
spr.killer.add_score()
else:
spr.killer.add_score(2)
arena.batch.remove(spr)
if not spr.killer.is_enemy:
arena.parent.update_score()
del spr
class Dot(Sprite):
def __init__(self, pos=None, color=None):
if color is None:
color = random.choice(define.ALL_COLOR)
super(Dot, self).__init__('circle.png', color=color)
self.killed = False
if pos is None:
self.position = (random.randint(40, define.WIDTH - 40),
random.randint(40, define.HEIGHT - 40))
self.is_big = False
self.scale = 0.8
else:
self.position = (pos[0] + random.random() * 32 - 16,
pos[1] + random.random() * 32 - 16)
self.is_big = True
self.schedule_interval(self.update, random.random() * 0.2 + 0.1)
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
def check_kill(self, snake):
if (not self.killed and not snake.is_dead) and (
abs(snake.x - self.x) < 32 and abs(snake.y - self.y) < 32
):
self.killed = True
self.killer = snake
self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))
|
normal
|
{
"blob_id": "be06a0ad22f4ae9ab4c0acea6a7c601c14a90fc4",
"index": 1995,
"step-1": "<mask token>\n\n\nclass Dot(Sprite):\n <mask token>\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-3": "<mask token>\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-4": "import random\nfrom cocos.actions import MoveTo, CallFuncS\nfrom cocos.sprite import Sprite\nimport define\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-5": "# -*- coding: utf-8 -*-\nimport random\nfrom cocos.actions import MoveTo, CallFuncS\nfrom cocos.sprite import Sprite\n\nimport define\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\nclass Dot(Sprite):\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = (random.randint(40, define.WIDTH - 40),\n random.randint(40, define.HEIGHT - 40))\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = (pos[0] + random.random() * 32 - 16,\n pos[1] + random.random() * 32 - 16)\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (\n abs(snake.x - self.x) < 32 and abs(snake.y - self.y) < 32\n ):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
##########################################################################################
## Scene Classification ##
## Authors : Chris Andrew, Santhoshini Reddy, Nikath Yasmeen, Sai Hima, Sriya Ragini ##
################################################################### ##
## Description: This project was developed as part of the DIP course at IIIT Sri City ##
## All code is available for free usage for educational purposes ##
## Authors do not authorize commercial use of the source code ##
##########################################################################################
# The following module shuffles the data to enable 10 fold cross-validation analysis
################ Imports ################
from random import shuffle
################ Global ################
path = "data/"
filename = "data"
################ Source ################
# ------------------------------------
f = open(path+filename+".csv",'r')
data = list()
train_data = list()
train_class = list()
# ------------------------------------
for line in f:
l = line.strip()
l = l.split(',')
l = map(float , l)
data.append(l)
# ------------------------------------
f.close()
# ------------------------------------
for i in range(100):
shuffle(data)
# ------------------------------------
for l in data:
train_data.append(l[0:-1])
train_class.append(int(l[-1]))
# ------------------------------------
f = open(path+filename+"_r.csv",'w')
for i in range(len(train_data)):
for entry in train_data[i]:
f.write(str(entry)+',')
# ------------------------------------
f.write(str(train_class[i])+'\n')
# ------------------------------------
f.close()
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------------------------------
|
normal
|
{
"blob_id": "b8b20d6c977a6c1df6a592188c6e799f12da6a23",
"index": 9734,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\n<mask token>\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n",
"step-3": "<mask token>\npath = 'data/'\nfilename = 'data'\nf = open(path + filename + '.csv', 'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\nf = open(path + filename + '_r.csv', 'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n",
"step-4": "from random import shuffle\npath = 'data/'\nfilename = 'data'\nf = open(path + filename + '.csv', 'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float, l)\n data.append(l)\nf.close()\nfor i in range(100):\n shuffle(data)\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\nf = open(path + filename + '_r.csv', 'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry) + ',')\n f.write(str(train_class[i]) + '\\n')\nf.close()\n",
"step-5": "##########################################################################################\n## Scene Classification ##\n## Authors : Chris Andrew, Santhoshini Reddy, Nikath Yasmeen, Sai Hima, Sriya Ragini ##\n################################################################### ##\n## Description: This project was developed as part of the DIP course at IIIT Sri City ##\n## All code is available for free usage for educational purposes ##\n## Authors do not authorize commercial use of the source code ##\n##########################################################################################\n\n# The following module shuffles the data to enable 10 fold cross-validation analysis\n\n################ Imports ################\nfrom random import shuffle\n################ Global ################\npath = \"data/\"\nfilename = \"data\"\n################ Source ################\n# ------------------------------------\nf = open(path+filename+\".csv\",'r')\ndata = list()\ntrain_data = list()\ntrain_class = list()\n# ------------------------------------\nfor line in f:\n l = line.strip()\n l = l.split(',')\n l = map(float , l)\n data.append(l)\n # ------------------------------------\nf.close()\n# ------------------------------------\nfor i in range(100):\n shuffle(data)\n# ------------------------------------\nfor l in data:\n train_data.append(l[0:-1])\n train_class.append(int(l[-1]))\n# ------------------------------------\nf = open(path+filename+\"_r.csv\",'w')\nfor i in range(len(train_data)):\n for entry in train_data[i]:\n f.write(str(entry)+',')\n # ------------------------------------\n f.write(str(train_class[i])+'\\n')\n # ------------------------------------\nf.close()\n# ------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------------------------------\n# ------------------------------------------------------------------------------------------------------------------------------------------\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Implements a Neural Network
"""
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print("Train size: {}".format(len(train)))
print("Test size: {}".format(len(test)))
# Normalization for values
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)
|
normal
|
{
"blob_id": "94d296b5a13bfa59dba5812da31707f9db9080af",
"index": 1292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\n<mask token>\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-3": "<mask token>\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-4": "<mask token>\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-5": "\"\"\"\nImplements a Neural Network\n\n\"\"\"\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\n\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\n\ntrain = list(read('train'))\ntest = list(read('test'))\n\nprint(\"Train size: {}\".format(len(train)))\nprint(\"Test size: {}\".format(len(test)))\n\n# Normalization for values\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\n\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\n\nvf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from PyQt5.QtWidgets import *
from select_substituents_table import *
from save_selection_dialog import *
class SelectSubsDialog(QDialog):
def __init__(self, r_group):
super().__init__()
self.r_group = r_group
self.substituents = None
self.new_set_saved = False
self.setWindowTitle(f"Select Substituents for {self.r_group}")
self.instructions_label = QLabel("Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.")
self.select_subs_table = SelectSubsTable()
self.confirm_button = QPushButton("Confirm Selection")
self.confirm_button.setEnabled(False)
self.save_as_set_button = QPushButton("Save Selection as Set")
self.save_as_set_button.setEnabled(False)
self.cancel_button = QPushButton("Cancel")
self.select_subs_button_layout = QHBoxLayout()
self.select_subs_button_layout.addWidget(self.confirm_button)
self.select_subs_button_layout.addWidget(self.save_as_set_button)
self.select_subs_button_layout.addWidget(self.cancel_button)
self.select_subs_layout = QVBoxLayout()
self.select_subs_layout.addWidget(self.instructions_label)
self.select_subs_layout.addWidget(self.select_subs_table)
self.select_subs_layout.addLayout(self.select_subs_button_layout)
self.setLayout(self.select_subs_layout)
self.select_subs_table.itemSelectionChanged.connect(self.enable_save_buttons)
self.confirm_button.clicked.connect(self.save_substituents)
self.save_as_set_button.clicked.connect(self.save_selection)
self.cancel_button.clicked.connect(self.close)
def enable_save_buttons(self):
self.confirm_button.setEnabled(True)
self.save_as_set_button.setEnabled(True)
def get_substituents(self):
self.substituents = list(dict.fromkeys([item.text() for item in self.select_subs_table.selectedItems()]))
def save_substituents(self):
self.get_substituents()
self.close()
def save_selection(self):
self.get_substituents()
save_selection_dialog = SaveSelectionDialog(self.substituents)
save_selection_dialog.exec_()
if save_selection_dialog.new_set_saved:
self.new_set_saved = True
self.close()
class SelectSubsForNewSetDialog(SelectSubsDialog):
def __init__(self):
super().__init__(r_group = "New Set")
self.confirm_button.setVisible(False)
class SelectSubsEditSetDialog(SelectSubsDialog):
def __init__(self, set_name):
super().__init__(r_group = None)
self.set_name = set_name
self.setWindowTitle(f"Select Groups for {self.set_name}")
self.save_as_set_button.setVisible(False)
|
normal
|
{
"blob_id": "849db3a92e0544661dd465b3e7f6949f8de5633b",
"index": 5099,
"step-1": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n <mask token>\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n <mask token>\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-2": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n <mask token>\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-3": "<mask token>\n\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n self.setWindowTitle(f'Select Substituents for {self.r_group}')\n self.instructions_label = QLabel(\n 'Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.'\n )\n self.select_subs_table = SelectSubsTable()\n self.confirm_button = QPushButton('Confirm Selection')\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton('Save Selection as Set')\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton('Cancel')\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n self.select_subs_table.itemSelectionChanged.connect(self.\n enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n <mask token>\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-4": "from PyQt5.QtWidgets import *\nfrom select_substituents_table import *\nfrom save_selection_dialog import *\n\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n self.setWindowTitle(f'Select Substituents for {self.r_group}')\n self.instructions_label = QLabel(\n 'Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.'\n )\n self.select_subs_table = SelectSubsTable()\n self.confirm_button = QPushButton('Confirm Selection')\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton('Save Selection as Set')\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton('Cancel')\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n self.select_subs_table.itemSelectionChanged.connect(self.\n enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in\n self.select_subs_table.selectedItems()]))\n\n def save_substituents(self):\n self.get_substituents()\n self.close()\n\n def save_selection(self):\n self.get_substituents()\n save_selection_dialog = SaveSelectionDialog(self.substituents)\n save_selection_dialog.exec_()\n if save_selection_dialog.new_set_saved:\n self.new_set_saved = True\n self.close()\n\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group='New Set')\n self.confirm_button.setVisible(False)\n\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group=None)\n self.set_name = set_name\n self.setWindowTitle(f'Select Groups for {self.set_name}')\n self.save_as_set_button.setVisible(False)\n",
"step-5": "from PyQt5.QtWidgets import *\n\nfrom select_substituents_table import *\nfrom save_selection_dialog import *\n\nclass SelectSubsDialog(QDialog):\n\n def __init__(self, r_group):\n super().__init__()\n self.r_group = r_group\n self.substituents = None\n self.new_set_saved = False\n\n self.setWindowTitle(f\"Select Substituents for {self.r_group}\")\n\n self.instructions_label = QLabel(\"Click row heading to select functional group set. Ctrl + click or Shift + click to select multiple items. Double click functional group name to view SMILES.\")\n\n self.select_subs_table = SelectSubsTable()\n\n self.confirm_button = QPushButton(\"Confirm Selection\")\n self.confirm_button.setEnabled(False)\n self.save_as_set_button = QPushButton(\"Save Selection as Set\")\n self.save_as_set_button.setEnabled(False)\n self.cancel_button = QPushButton(\"Cancel\")\n\n self.select_subs_button_layout = QHBoxLayout()\n self.select_subs_button_layout.addWidget(self.confirm_button)\n self.select_subs_button_layout.addWidget(self.save_as_set_button)\n self.select_subs_button_layout.addWidget(self.cancel_button)\n\n self.select_subs_layout = QVBoxLayout()\n self.select_subs_layout.addWidget(self.instructions_label)\n self.select_subs_layout.addWidget(self.select_subs_table)\n self.select_subs_layout.addLayout(self.select_subs_button_layout)\n self.setLayout(self.select_subs_layout)\n\n self.select_subs_table.itemSelectionChanged.connect(self.enable_save_buttons)\n self.confirm_button.clicked.connect(self.save_substituents)\n self.save_as_set_button.clicked.connect(self.save_selection)\n self.cancel_button.clicked.connect(self.close)\n\n def enable_save_buttons(self):\n self.confirm_button.setEnabled(True)\n self.save_as_set_button.setEnabled(True)\n\n def get_substituents(self):\n self.substituents = list(dict.fromkeys([item.text() for item in self.select_subs_table.selectedItems()]))\n \n def save_substituents(self):\n self.get_substituents()\n self.close()\n\n def save_selection(self):\n self.get_substituents()\n save_selection_dialog = SaveSelectionDialog(self.substituents)\n save_selection_dialog.exec_()\n if save_selection_dialog.new_set_saved:\n self.new_set_saved = True\n self.close()\n\nclass SelectSubsForNewSetDialog(SelectSubsDialog):\n\n def __init__(self):\n super().__init__(r_group = \"New Set\")\n\n self.confirm_button.setVisible(False)\n\nclass SelectSubsEditSetDialog(SelectSubsDialog):\n\n def __init__(self, set_name):\n super().__init__(r_group = None)\n self.set_name = set_name\n \n self.setWindowTitle(f\"Select Groups for {self.set_name}\")\n\n self.save_as_set_button.setVisible(False)\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
"""
Merkle: Implementation of Merkle Trees over Blake2
"""
from typing import List, Any
from hashlib import blake2b
class Merkle:
"""
We consider the merkle tree as a commitment protocol implementing
the interface:
* commit_() : commits to a list by computing the merkle tree.
* open_() : opens the commitment by computing the authentification path.
* verify_() : verify that a value is commited by checking that its a leaf.
"""
H = blake2b
def commit_(leafs):
assert len(leafs) & (len(leafs) - 1) == 0, "List must be of a power two length"
if len(leafs) == 1:
return leafs[0]
return Merkle.H(
Merkle.commit_(leafs[: (len(leafs) // 2)])
+ Merkle.commit_(leafs[(len(leafs) // 2) :])
).digest()
def open_(index, leafs):
assert len(leafs) & (len(leafs) - 1) == 0, "List must be of a power two length"
assert 0 <= index and index < len(leafs)
if len(leafs) == 2:
return [leafs[1 - index]]
elif index < (len(leafs) / 2):
return Merkle.open_(index, leafs[: (len(leafs) // 2)]) + [
Merkle.commit_(leafs[(len(leafs) // 2) :])
]
else:
return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) // 2 :]) + [
Merkle.commit_(leafs[: len(leafs) // 2])
]
def verify_(root, index, path, leaf):
assert 0 <= index and index < (1 << len(path)), "cannot verify invalid index"
if len(path) == 1:
if index == 0:
return root == Merkle.H(leaf + path[0]).digest()
else:
return root == Merkle.H(path[0] + leaf).digest()
else:
if index % 2 == 0:
return Merkle.verify_(
root, index >> 1, path[1:], Merkle.H(leaf + path[0]).digest()
)
else:
return Merkle.verify_(
root, index >> 1, path[1:], Merkle.H(path[0] + leaf).digest()
)
# The following functions expose the API and compute hashes of leafs before
# calling the underlying code.
def commit(leafs: List[Any]):
return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in leafs])
def open(index: int, leafs: List[Any]):
return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in leafs])
def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]):
return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest())
|
normal
|
{
"blob_id": "547926904f9a4b88a988e3b59c49b94fe0e30de4",
"index": 1955,
"step-1": "<mask token>\n\n\nclass Merkle:\n <mask token>\n <mask token>\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n <mask token>\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-2": "<mask token>\n\n\nclass Merkle:\n <mask token>\n <mask token>\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-3": "<mask token>\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-4": "<mask token>\nfrom typing import List, Any\nfrom hashlib import blake2b\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(Merkle.commit_(leafs[:len(leafs) // 2]) + Merkle.\n commit_(leafs[len(leafs) // 2:])).digest()\n\n def open_(index, leafs):\n assert len(leafs) & len(leafs\n ) - 1 == 0, 'List must be of a power two length'\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < len(leafs) / 2:\n return Merkle.open_(index, leafs[:len(leafs) // 2]) + [Merkle.\n commit_(leafs[len(leafs) // 2:])]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) //\n 2:]) + [Merkle.commit_(leafs[:len(leafs) // 2])]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < 1 << len(path\n ), 'cannot verify invalid index'\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n elif index % 2 == 0:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(leaf +\n path[0]).digest())\n else:\n return Merkle.verify_(root, index >> 1, path[1:], Merkle.H(path\n [0] + leaf).digest())\n\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in\n leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]\n ):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest()\n )\n",
"step-5": "\"\"\"\nMerkle: Implementation of Merkle Trees over Blake2\n\"\"\"\nfrom typing import List, Any\nfrom hashlib import blake2b\n\n\nclass Merkle:\n \"\"\"\n We consider the merkle tree as a commitment protocol implementing\n the interface:\n * commit_() : commits to a list by computing the merkle tree.\n * open_() : opens the commitment by computing the authentification path.\n * verify_() : verify that a value is commited by checking that its a leaf.\n \"\"\"\n\n H = blake2b\n\n def commit_(leafs):\n assert len(leafs) & (len(leafs) - 1) == 0, \"List must be of a power two length\"\n if len(leafs) == 1:\n return leafs[0]\n return Merkle.H(\n Merkle.commit_(leafs[: (len(leafs) // 2)])\n + Merkle.commit_(leafs[(len(leafs) // 2) :])\n ).digest()\n\n def open_(index, leafs):\n assert len(leafs) & (len(leafs) - 1) == 0, \"List must be of a power two length\"\n assert 0 <= index and index < len(leafs)\n if len(leafs) == 2:\n return [leafs[1 - index]]\n elif index < (len(leafs) / 2):\n return Merkle.open_(index, leafs[: (len(leafs) // 2)]) + [\n Merkle.commit_(leafs[(len(leafs) // 2) :])\n ]\n else:\n return Merkle.open_(index - len(leafs) // 2, leafs[len(leafs) // 2 :]) + [\n Merkle.commit_(leafs[: len(leafs) // 2])\n ]\n\n def verify_(root, index, path, leaf):\n assert 0 <= index and index < (1 << len(path)), \"cannot verify invalid index\"\n if len(path) == 1:\n if index == 0:\n return root == Merkle.H(leaf + path[0]).digest()\n else:\n return root == Merkle.H(path[0] + leaf).digest()\n else:\n if index % 2 == 0:\n return Merkle.verify_(\n root, index >> 1, path[1:], Merkle.H(leaf + path[0]).digest()\n )\n else:\n return Merkle.verify_(\n root, index >> 1, path[1:], Merkle.H(path[0] + leaf).digest()\n )\n\n # The following functions expose the API and compute hashes of leafs before\n # calling the underlying code.\n def commit(leafs: List[Any]):\n return Merkle.commit_([Merkle.H(bytes(leaf)).digest() for leaf in leafs])\n\n def open(index: int, leafs: List[Any]):\n return Merkle.open_(index, [Merkle.H(bytes(leaf)).digest() for leaf in leafs])\n\n def verify(root: bytes, index: int, path: List[List[Any]], leaf: List[Any]):\n return Merkle.verify_(root, index, path, Merkle.H(bytes(leaf)).digest())\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
class CustomPrinter(object):
def __init__(self, val):
self.val = val
def to_string(self):
res = "{"
for m in xrange(64):
res += hex(int(self.val[m]))
if m != 63:
res += ", "
res += " }"
return res
def lookup_type(val):
if str(val.type) == 'unsigned char [64]':
return CustomPrinter(val)
return None
gdb.pretty_printers.append(lookup_type)
|
normal
|
{
"blob_id": "4d5b2ed016cfc6740c3ee5397c894fabc1bec73f",
"index": 6963,
"step-1": "class CustomPrinter(object):\n <mask token>\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\n<mask token>\n",
"step-2": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\n<mask token>\n",
"step-3": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\n<mask token>\n",
"step-4": "class CustomPrinter(object):\n\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = '{'\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += ', '\n res += ' }'\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\ngdb.pretty_printers.append(lookup_type)\n",
"step-5": "class CustomPrinter(object):\n def __init__(self, val):\n self.val = val\n\n def to_string(self):\n res = \"{\"\n for m in xrange(64):\n res += hex(int(self.val[m]))\n if m != 63:\n res += \", \"\n res += \" }\"\n return res\n\n\ndef lookup_type(val):\n if str(val.type) == 'unsigned char [64]':\n return CustomPrinter(val)\n return None\n\n\ngdb.pretty_printers.append(lookup_type)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from asteroidhunter import __version__
import unittest, requests, json, os, pytest
from dotenv import load_dotenv
load_dotenv()
from asteroidhunter.asteroid_closest_approach import asteroid_closest_approach
def test_version():
assert __version__ == '0.1.0'
@pytest.mark.vcr()
def test_asteroid_closest_approach():
asteroid_json = asteroid_closest_approach(25)
asteroids = json.loads(asteroid_json)
for i in range(0, len(asteroids) - 1):
assert asteroids[i]['close_approach_date']
assert asteroids[i]['close_approach_date_full']
assert asteroids[i]['epoch_date_close_approach']
assert asteroids[i]['miss_distance']
assert asteroids[i]['orbiting_body']
assert asteroids[i]
assert type(asteroids[i]) is dict
|
normal
|
{
"blob_id": "7dd4dc60b23c72ba450025bececb0e6d89df69c3",
"index": 8263,
"step-1": "<mask token>\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n\[email protected]()\ndef test_asteroid_closest_approach():\n asteroid_json = asteroid_closest_approach(25)\n asteroids = json.loads(asteroid_json)\n for i in range(0, len(asteroids) - 1):\n assert asteroids[i]['close_approach_date']\n assert asteroids[i]['close_approach_date_full']\n assert asteroids[i]['epoch_date_close_approach']\n assert asteroids[i]['miss_distance']\n assert asteroids[i]['orbiting_body']\n assert asteroids[i]\n assert type(asteroids[i]) is dict\n",
"step-3": "<mask token>\nload_dotenv()\n<mask token>\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n\[email protected]()\ndef test_asteroid_closest_approach():\n asteroid_json = asteroid_closest_approach(25)\n asteroids = json.loads(asteroid_json)\n for i in range(0, len(asteroids) - 1):\n assert asteroids[i]['close_approach_date']\n assert asteroids[i]['close_approach_date_full']\n assert asteroids[i]['epoch_date_close_approach']\n assert asteroids[i]['miss_distance']\n assert asteroids[i]['orbiting_body']\n assert asteroids[i]\n assert type(asteroids[i]) is dict\n",
"step-4": "from asteroidhunter import __version__\nimport unittest, requests, json, os, pytest\nfrom dotenv import load_dotenv\nload_dotenv()\nfrom asteroidhunter.asteroid_closest_approach import asteroid_closest_approach\n\n\ndef test_version():\n assert __version__ == '0.1.0'\n\n\[email protected]()\ndef test_asteroid_closest_approach():\n asteroid_json = asteroid_closest_approach(25)\n asteroids = json.loads(asteroid_json)\n for i in range(0, len(asteroids) - 1):\n assert asteroids[i]['close_approach_date']\n assert asteroids[i]['close_approach_date_full']\n assert asteroids[i]['epoch_date_close_approach']\n assert asteroids[i]['miss_distance']\n assert asteroids[i]['orbiting_body']\n assert asteroids[i]\n assert type(asteroids[i]) is dict\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import numpy as np
import math
class KMeans(object):
def __init__(self, data, option):
self.data = data
self.membership = None
self.centroids = None
self.option = option
self.temp_data = None
def fit(self, K):
data = np.asmatrix(self.data[0])
if self.option == 2:
self.data[:, 2] = np.log(data[:, 2])
self.data[:, 3] = np.log(data[:, 3])
elif self.option == 3:
for j in range(self.data.shape[1]):
self.data[:, j] -= np.mean(self.data[:, j])
self.data[:, j] /= np.std(self.data[:, j])
elif self.option == 5:
self.temp_data = self.data
np.random.shuffle(self.data)
self.data = self.data[0:int(self.data.shape[0]*.06), :]
centroids = self.data[np.random.choice(self.data.shape[0], K, replace=False), :]
membership = np.zeros(self.data.shape[0]).astype(int)
centroids_temp = None
while not np.array_equal(centroids_temp, centroids):
centroids_temp = np.copy(centroids)
for i, d in enumerate(self.data):
if self.option == 4:
membership[i] = np.argmin(np.array([np.abs(d - c).sum() for c in centroids]))
else:
membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))
for i in range(centroids.shape[0]):
centroids[i] = self.data[membership == i].mean(axis=0)
self.centroids = np.copy(centroids)
self.membership = np.copy(membership)
if self.option == 5:
self.data = self.temp_data
self.membership = np.zeros(self.data.shape[0]).astype(int)
for i, d in enumerate(self.data):
self.membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))
def predict(self):
error = 0
for i, c in enumerate(self.centroids):
subset = self.data[self.membership == i]
for i, d in enumerate(subset):
error += ((d - c) ** 2).sum()
return error
|
normal
|
{
"blob_id": "5cf73e003b744b438c0db67ab39fb10a3f879f2f",
"index": 8556,
"step-1": "<mask token>\n\n\nclass KMeans(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass KMeans(object):\n\n def __init__(self, data, option):\n self.data = data\n self.membership = None\n self.centroids = None\n self.option = option\n self.temp_data = None\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass KMeans(object):\n\n def __init__(self, data, option):\n self.data = data\n self.membership = None\n self.centroids = None\n self.option = option\n self.temp_data = None\n\n def fit(self, K):\n data = np.asmatrix(self.data[0])\n if self.option == 2:\n self.data[:, 2] = np.log(data[:, 2])\n self.data[:, 3] = np.log(data[:, 3])\n elif self.option == 3:\n for j in range(self.data.shape[1]):\n self.data[:, j] -= np.mean(self.data[:, j])\n self.data[:, j] /= np.std(self.data[:, j])\n elif self.option == 5:\n self.temp_data = self.data\n np.random.shuffle(self.data)\n self.data = self.data[0:int(self.data.shape[0] * 0.06), :]\n centroids = self.data[np.random.choice(self.data.shape[0], K,\n replace=False), :]\n membership = np.zeros(self.data.shape[0]).astype(int)\n centroids_temp = None\n while not np.array_equal(centroids_temp, centroids):\n centroids_temp = np.copy(centroids)\n for i, d in enumerate(self.data):\n if self.option == 4:\n membership[i] = np.argmin(np.array([np.abs(d - c).sum() for\n c in centroids]))\n else:\n membership[i] = np.argmin(np.array([np.sqrt(((d - c) **\n 2).sum()) for c in centroids]))\n for i in range(centroids.shape[0]):\n centroids[i] = self.data[membership == i].mean(axis=0)\n self.centroids = np.copy(centroids)\n self.membership = np.copy(membership)\n if self.option == 5:\n self.data = self.temp_data\n self.membership = np.zeros(self.data.shape[0]).astype(int)\n for i, d in enumerate(self.data):\n self.membership[i] = np.argmin(np.array([np.sqrt(((d - c) **\n 2).sum()) for c in centroids]))\n\n def predict(self):\n error = 0\n for i, c in enumerate(self.centroids):\n subset = self.data[self.membership == i]\n for i, d in enumerate(subset):\n error += ((d - c) ** 2).sum()\n return error\n",
"step-4": "import numpy as np\nimport math\n\n\nclass KMeans(object):\n\n def __init__(self, data, option):\n self.data = data\n self.membership = None\n self.centroids = None\n self.option = option\n self.temp_data = None\n\n def fit(self, K):\n data = np.asmatrix(self.data[0])\n if self.option == 2:\n self.data[:, 2] = np.log(data[:, 2])\n self.data[:, 3] = np.log(data[:, 3])\n elif self.option == 3:\n for j in range(self.data.shape[1]):\n self.data[:, j] -= np.mean(self.data[:, j])\n self.data[:, j] /= np.std(self.data[:, j])\n elif self.option == 5:\n self.temp_data = self.data\n np.random.shuffle(self.data)\n self.data = self.data[0:int(self.data.shape[0] * 0.06), :]\n centroids = self.data[np.random.choice(self.data.shape[0], K,\n replace=False), :]\n membership = np.zeros(self.data.shape[0]).astype(int)\n centroids_temp = None\n while not np.array_equal(centroids_temp, centroids):\n centroids_temp = np.copy(centroids)\n for i, d in enumerate(self.data):\n if self.option == 4:\n membership[i] = np.argmin(np.array([np.abs(d - c).sum() for\n c in centroids]))\n else:\n membership[i] = np.argmin(np.array([np.sqrt(((d - c) **\n 2).sum()) for c in centroids]))\n for i in range(centroids.shape[0]):\n centroids[i] = self.data[membership == i].mean(axis=0)\n self.centroids = np.copy(centroids)\n self.membership = np.copy(membership)\n if self.option == 5:\n self.data = self.temp_data\n self.membership = np.zeros(self.data.shape[0]).astype(int)\n for i, d in enumerate(self.data):\n self.membership[i] = np.argmin(np.array([np.sqrt(((d - c) **\n 2).sum()) for c in centroids]))\n\n def predict(self):\n error = 0\n for i, c in enumerate(self.centroids):\n subset = self.data[self.membership == i]\n for i, d in enumerate(subset):\n error += ((d - c) ** 2).sum()\n return error\n",
"step-5": "import numpy as np\nimport math\n\n\nclass KMeans(object):\n\n def __init__(self, data, option):\n self.data = data\n self.membership = None\n self.centroids = None\n self.option = option\n self.temp_data = None\n\n def fit(self, K):\n data = np.asmatrix(self.data[0])\n if self.option == 2:\n self.data[:, 2] = np.log(data[:, 2])\n self.data[:, 3] = np.log(data[:, 3])\n elif self.option == 3:\n for j in range(self.data.shape[1]):\n self.data[:, j] -= np.mean(self.data[:, j])\n self.data[:, j] /= np.std(self.data[:, j])\n elif self.option == 5:\n self.temp_data = self.data\n np.random.shuffle(self.data)\n self.data = self.data[0:int(self.data.shape[0]*.06), :]\n\n centroids = self.data[np.random.choice(self.data.shape[0], K, replace=False), :]\n membership = np.zeros(self.data.shape[0]).astype(int)\n centroids_temp = None\n while not np.array_equal(centroids_temp, centroids):\n centroids_temp = np.copy(centroids)\n for i, d in enumerate(self.data):\n if self.option == 4:\n membership[i] = np.argmin(np.array([np.abs(d - c).sum() for c in centroids]))\n else:\n membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))\n\n for i in range(centroids.shape[0]):\n centroids[i] = self.data[membership == i].mean(axis=0)\n\n self.centroids = np.copy(centroids)\n self.membership = np.copy(membership)\n\n if self.option == 5:\n self.data = self.temp_data\n self.membership = np.zeros(self.data.shape[0]).astype(int)\n for i, d in enumerate(self.data):\n self.membership[i] = np.argmin(np.array([np.sqrt(((d - c) ** 2).sum()) for c in centroids]))\n\n def predict(self):\n error = 0\n for i, c in enumerate(self.centroids):\n subset = self.data[self.membership == i]\n for i, d in enumerate(subset):\n error += ((d - c) ** 2).sum()\n return error\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
"""asks the user for english words to latinize"""
def latinize_word(word):
"""performs bee latin on a word"""
if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':
word = word[1:] + word[0] + 'uzz'
else:
word += 'buzz'
return word.lower()
def latinize_sentence(sentence):
"""performs bee latin on a sentence"""
words = sentence.split()
latanized_words = [latinize_word(word) for word in words]
return " ".join(latanized_words)
def main():
"""main function"""
english_sentence = input('Enter English sentence: ')
while english_sentence != 'q':
print(f'Bee latin = {latinize_sentence(english_sentence)}')
english_sentence = input('Enter English sentence: ')
print(latinize_word('goodbye'))
main()
|
normal
|
{
"blob_id": "5810739300067e8f207d09bf971484a278372a9a",
"index": 5246,
"step-1": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\n<mask token>\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return ' '.join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()\n",
"step-5": "\"\"\"asks the user for english words to latinize\"\"\"\n\n\ndef latinize_word(word):\n \"\"\"performs bee latin on a word\"\"\"\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()\n\n\ndef latinize_sentence(sentence):\n \"\"\"performs bee latin on a sentence\"\"\"\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return \" \".join(latanized_words)\n\n\ndef main():\n \"\"\"main function\"\"\"\n english_sentence = input('Enter English sentence: ')\n while english_sentence != 'q':\n print(f'Bee latin = {latinize_sentence(english_sentence)}')\n english_sentence = input('Enter English sentence: ')\n print(latinize_word('goodbye'))\n\n\nmain()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# %%
import numpy as np
from numpy import sin, cos, pi
import gym
import seagul.envs
from seagul.integration import rk4,euler
from control import lqr, ctrb
from torch.multiprocessing import Pool
import matplotlib.pyplot as plt
import matplotlib
#matplotlib.use('Qt5Agg')
import time
global_start = time.time()
# %%
m1 = 1
m2 = 1
l1 = 1
l2 = 2
lc1 = .5
lc2 = 1
I1 = .083
I2 = .33
g = 9.8
#
# m1 = 1
# m2 = 1
# l1 = 1
# l2 = 1
# lc1 = .5
# lc2 = .5
# I1 = .2
# I2 = 1.0
# g = 9.8
dt = .01
max_torque = 25
integrator = euler
Q = np.identity(4)
Q[0, 0] = 1
Q[1, 1] = 1
Q[2, 2] = 1
Q[3, 3] = 1
#
# Q = np.array([[1000, -500, 0,0],[-500, 1000, 0, 0],[0, 0, 1000, -500],[0,0,-500,1000]])
R = np.identity(2) * .01
eval_max_t = 10
th1 = pi / 2
th2 = 0
th1d = 0
th2d = 0
TAU = np.array([[0], [1]])
m11 = m1 * lc1 ** 2 + m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(th2)) + I1 + I2
m22 = m2 * lc2 ** 2 + I2
m12 = m2 * (lc2 ** 2 + l1 * lc2 * cos(th2)) + I2
M = np.array([[m11, m12], [m12, m22]])
h1 = -m2 * l1 * lc2 * sin(th2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2) * th2d * th1d
h2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2
H = np.array([[h1], [h2]])
phi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)
phi2 = m2 * lc2 * g * cos(th1 + th2)
PHI = np.array([[phi1], [phi2]])
Bl = np.linalg.inv(M) @ TAU
Blin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])
DPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-m2 * lc2 * g, -m2 * lc2 * g]])
Al = -np.linalg.inv(M) @ DPHI
Alin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])
Ctr = ctrb(Alin, Blin)
assert np.linalg.matrix_rank(Ctr) == 4
K, S, E = lqr(Alin, Blin, Q, R)
k = np.array(K[1, :])
print(k)
def control(q):
gs = np.array([pi / 2, 0, 0, 0])
return -k.dot(q - gs)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
done = reward < 2
return reward, done
def do_rollout(args):
x, trial_num = args
th1, th2, dth1, dth2 = x
np.random.seed(trial_num)
local_reward_hist = np.ones((env.num_steps, 1)) * -1
obs = env.reset(init_vec=[th1, th2, dth1, dth2])
for i in range(env.num_steps):
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
obs, reward, done, _ = env.step(actions)
local_reward_hist[i, :] = np.copy(reward)
if done:
break
return local_reward_hist, i
# %%b
start = time.time()
config = {"init_state": [0, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [0, 0, 0, 0],
"max_t" : 2.5,
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 200000
reward_hist = np.zeros((num_trials, env.num_steps, 1))
X = np.zeros((num_trials, 4), dtype=np.float32)
Y = np.zeros((num_trials, 1), dtype=np.float32)
th1_min = pi / 2 - .5
th1_max = pi / 2 + .5
th2_min = -1
th2_max = 1
th1dot_min = -5
th1dot_max = 5
th2dot_min = -10
th2dot_max = 10
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i, :] = samples[i, :]
Y[i] = sum(rews) > env.num_steps*3 - 10
th1_min = 0
th1_max = 2*pi
th2_min = -pi
th2_max = pi
th1dot_min = -10
th1dot_max = 10
th2dot_min = -30
th2dot_max = 30
samples = np.random.random_sample((int(num_trials/2), 4))
samples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])
samples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])
total_steps = 0
for i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):
rews, steps = res
reward_hist[i, :, :] = rews
total_steps += steps
X[i+int(num_trials/2), :] = samples[i, :]
Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5
print(time.time() - start)
# %%
from seagul.nn import MLP, fit_model
import torch
net = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)
Y0 = np.ones((num_trials, 1), dtype=np.float32)
w = 1e-2
class_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)
loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())
# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)
# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))
plt.close()
plt.plot(loss_hist)
plt.show()
# %%
n_thdot = 1
n_th = 1000
th1_vals = np.linspace(0, 2*pi, n_th)
th2_vals = np.linspace(-pi, pi, n_th)
th1dot_vals = np.linspace(-10, 10, n_th)
th2dot_vals = np.linspace(-30, 30, n_th)
sig = torch.nn.Sigmoid()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
from itertools import product
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1_vals, th2_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('Theta')
ax.set_xlabel('Th1')
ax.set_ylabel('Th2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
coords = np.zeros((n_th, n_th, 4), dtype=np.float32)
start = time.time()
for i, j in product(range(n_th), range(n_th)):
coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]])
preds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())
end = time.time()
print(end - start)
fig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))
# generate 2 2d grids for the x & y bounds
x, y = np.meshgrid(th1dot_vals, th2dot_vals)
z = preds
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = z[:-1, :-1]
z_min, z_max = 0, np.abs(z).max()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('DTheta')
ax.set_xlabel('dth1')
ax.set_ylabel('dth2')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
# %%
torch.set_default_dtype(torch.float32)
def reward_fn(s, a):
reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])
return reward, False
def do_rollout(trial_num):
np.random.seed(trial_num)
act_hold = 20
hold_count = 0
obs = env.reset()
local_lqr = False
actions = np.random.randn(1) * 3
local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0]))
local_reward_hist = np.zeros((env.num_steps, 1))
local_gate_hist = np.zeros((env.num_steps, 1))
local_action_hist = np.zeros((env.num_steps, 1))
for i in range(env.num_steps):
obs = np.array(obs, dtype=np.float32)
if sig(net(obs)) > .85:
actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)
local_lqr = True
local_gate_hist[i] = 1
else:
local_gate_hist[i] = 0
if hold_count == act_hold:
actions = np.random.randn(1) * 3
hold_count = 0
hold_count += 1
obs, reward, done, _ = env.step(actions)
local_action_hist[i, :] = np.copy(actions)
local_state_hist[i, :] = np.copy(obs)
local_reward_hist[i, :] = np.copy(reward)
return local_action_hist, local_state_hist, local_reward_hist, local_gate_hist, local_lqr
config = {"init_state": [-pi / 2, 0, 0, 0],
"max_torque": max_torque,
"init_state_weights": [1, 1, 5, 5],
"dt": dt,
"m2": m2,
"m1": m1,
"l1": l1,
"lc1": lc1,
"lc2": lc2,
"i1": I1,
"i2": I2,
"integrator" : integrator,
"reward_fn": reward_fn,
"act_hold": 1,
"max_t" : 10
}
env = gym.make('su_acrobot-v0', **config)
num_trials = 1000
action_hist = np.zeros((num_trials, env.num_steps, 1))
state_hist = np.zeros((num_trials, env.num_steps, env.observation_space.shape[0]))
reward_hist = np.zeros((num_trials, env.num_steps, 1))
gate_hist = np.zeros((num_trials, env.num_steps, 1))
err_hist = np.zeros((num_trials, 1))
lqr_list = []
success_list = []
act_hold = 20
hold_count = 0
obs = env.reset()
start = time.time()
pool = Pool() # defaults to number of available CPU's
for i, res in enumerate(pool.imap(do_rollout,range(num_trials))):
acts, obs, rews, gate, lqr_on = res
action_hist[i, :, :] = acts
state_hist[i, :, :] = obs
reward_hist[i, :, :] = rews
gate_hist[i, :, :] = gate
err_hist[i] = (np.sqrt(sum(((state_hist[i, -1, :] - np.array([pi / 2, 0, 0, 0])) ** 2))))
if lqr_on:
lqr_list.append(i)
#print(err_hist[i])
#print(reward_hist[i,-1])
if err_hist[i] < 2:
success_list.append(i)
#
# for i in (range(num_trials)):
# res = do_rollout(i)
# acts, obs, rews, gate, lqr_on = res
# action_hist[i, :, :] = acts
# state_hist[i, :, :] = obs
# reward_hist[i, :, :] = rews
# gate_hist[i, :, :] = gate
# err_hist[i] = (np.sqrt(sum(((state_hist[i, -1, :] - np.array([pi / 2, 0, 0, 0])) ** 2))))
# if lqr_on:
# lqr_list.append(i)
# #print(err_hist[i])
# #print(reward_hist[i,-1])
# if err_hist[i] < 2:
# success_list.append(i)
print(len(lqr_list))
print(len(success_list))
print((time.time() - global_start) / 60)
|
normal
|
{
"blob_id": "358d4573ff386d6874d5bb5decfe71c71141bf1c",
"index": 2525,
"step-1": "<mask token>\n\n\ndef control(q):\n gs = np.array([pi / 2, 0, 0, 0])\n return -k.dot(q - gs)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n done = reward < 2\n return reward, done\n\n\ndef do_rollout(args):\n x, trial_num = args\n th1, th2, dth1, dth2 = x\n np.random.seed(trial_num)\n local_reward_hist = np.ones((env.num_steps, 1)) * -1\n obs = env.reset(init_vec=[th1, th2, dth1, dth2])\n for i in range(env.num_steps):\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n obs, reward, done, _ = env.step(actions)\n local_reward_hist[i, :] = np.copy(reward)\n if done:\n break\n return local_reward_hist, i\n\n\n<mask token>\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n return reward, False\n\n\ndef do_rollout(trial_num):\n np.random.seed(trial_num)\n act_hold = 20\n hold_count = 0\n obs = env.reset()\n local_lqr = False\n actions = np.random.randn(1) * 3\n local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0])\n )\n local_reward_hist = np.zeros((env.num_steps, 1))\n local_gate_hist = np.zeros((env.num_steps, 1))\n local_action_hist = np.zeros((env.num_steps, 1))\n for i in range(env.num_steps):\n obs = np.array(obs, dtype=np.float32)\n if sig(net(obs)) > 0.85:\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque\n )\n local_lqr = True\n local_gate_hist[i] = 1\n else:\n local_gate_hist[i] = 0\n if hold_count == act_hold:\n actions = np.random.randn(1) * 3\n hold_count = 0\n hold_count += 1\n obs, reward, done, _ = env.step(actions)\n local_action_hist[i, :] = np.copy(actions)\n local_state_hist[i, :] = np.copy(obs)\n local_reward_hist[i, :] = np.copy(reward)\n return (local_action_hist, local_state_hist, local_reward_hist,\n local_gate_hist, local_lqr)\n\n\n<mask token>\n",
"step-2": "<mask token>\nassert np.linalg.matrix_rank(Ctr) == 4\n<mask token>\nprint(k)\n\n\ndef control(q):\n gs = np.array([pi / 2, 0, 0, 0])\n return -k.dot(q - gs)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n done = reward < 2\n return reward, done\n\n\ndef do_rollout(args):\n x, trial_num = args\n th1, th2, dth1, dth2 = x\n np.random.seed(trial_num)\n local_reward_hist = np.ones((env.num_steps, 1)) * -1\n obs = env.reset(init_vec=[th1, th2, dth1, dth2])\n for i in range(env.num_steps):\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n obs, reward, done, _ = env.step(actions)\n local_reward_hist[i, :] = np.copy(reward)\n if done:\n break\n return local_reward_hist, i\n\n\n<mask token>\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\n<mask token>\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i, :] = samples[i, :]\n Y[i] = sum(rews) > env.num_steps * 3 - 10\n<mask token>\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\n<mask token>\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2), int(num_trials))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i + int(num_trials / 2), :] = samples[i, :]\n Y[i + int(num_trials / 2)] = sum(rews) > env.num_steps * 3 - 5\nprint(time.time() - start)\n<mask token>\nplt.close()\nplt.plot(loss_hist)\nplt.show()\n<mask token>\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])\n<mask token>\nprint(end - start)\n<mask token>\nax.set_title('Theta')\nax.set_xlabel('Th1')\nax.set_ylabel('Th2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\n<mask token>\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([pi / 2, 0, th1dot_vals[i], th2dot_vals[j]])\n<mask token>\nprint(end - start)\n<mask token>\nax.set_title('DTheta')\nax.set_xlabel('dth1')\nax.set_ylabel('dth2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\ntorch.set_default_dtype(torch.float32)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n return reward, False\n\n\ndef do_rollout(trial_num):\n np.random.seed(trial_num)\n act_hold = 20\n hold_count = 0\n obs = env.reset()\n local_lqr = False\n actions = np.random.randn(1) * 3\n local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0])\n )\n local_reward_hist = np.zeros((env.num_steps, 1))\n local_gate_hist = np.zeros((env.num_steps, 1))\n local_action_hist = np.zeros((env.num_steps, 1))\n for i in range(env.num_steps):\n obs = np.array(obs, dtype=np.float32)\n if sig(net(obs)) > 0.85:\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque\n )\n local_lqr = True\n local_gate_hist[i] = 1\n else:\n local_gate_hist[i] = 0\n if hold_count == act_hold:\n actions = np.random.randn(1) * 3\n hold_count = 0\n hold_count += 1\n obs, reward, done, _ = env.step(actions)\n local_action_hist[i, :] = np.copy(actions)\n local_state_hist[i, :] = np.copy(obs)\n local_reward_hist[i, :] = np.copy(reward)\n return (local_action_hist, local_state_hist, local_reward_hist,\n local_gate_hist, local_lqr)\n\n\n<mask token>\nfor i, res in enumerate(pool.imap(do_rollout, range(num_trials))):\n acts, obs, rews, gate, lqr_on = res\n action_hist[i, :, :] = acts\n state_hist[i, :, :] = obs\n reward_hist[i, :, :] = rews\n gate_hist[i, :, :] = gate\n err_hist[i] = np.sqrt(sum((state_hist[i, -1, :] - np.array([pi / 2, 0, \n 0, 0])) ** 2))\n if lqr_on:\n lqr_list.append(i)\n if err_hist[i] < 2:\n success_list.append(i)\nprint(len(lqr_list))\nprint(len(success_list))\nprint((time.time() - global_start) / 60)\n",
"step-3": "<mask token>\nglobal_start = time.time()\nm1 = 1\nm2 = 1\nl1 = 1\nl2 = 2\nlc1 = 0.5\nlc2 = 1\nI1 = 0.083\nI2 = 0.33\ng = 9.8\ndt = 0.01\nmax_torque = 25\nintegrator = euler\nQ = np.identity(4)\nQ[0, 0] = 1\nQ[1, 1] = 1\nQ[2, 2] = 1\nQ[3, 3] = 1\nR = np.identity(2) * 0.01\neval_max_t = 10\nth1 = pi / 2\nth2 = 0\nth1d = 0\nth2d = 0\nTAU = np.array([[0], [1]])\nm11 = m1 * lc1 ** 2 + m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(th2)\n ) + I1 + I2\nm22 = m2 * lc2 ** 2 + I2\nm12 = m2 * (lc2 ** 2 + l1 * lc2 * cos(th2)) + I2\nM = np.array([[m11, m12], [m12, m22]])\nh1 = -m2 * l1 * lc2 * sin(th2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2\n ) * th2d * th1d\nh2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2\nH = np.array([[h1], [h2]])\nphi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)\nphi2 = m2 * lc2 * g * cos(th1 + th2)\nPHI = np.array([[phi1], [phi2]])\nBl = np.linalg.inv(M) @ TAU\nBlin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])\nDPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-\n m2 * lc2 * g, -m2 * lc2 * g]])\nAl = -np.linalg.inv(M) @ DPHI\nAlin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [\n Al[1, 0], Al[1, 1], 0, 0]])\nCtr = ctrb(Alin, Blin)\nassert np.linalg.matrix_rank(Ctr) == 4\nK, S, E = lqr(Alin, Blin, Q, R)\nk = np.array(K[1, :])\nprint(k)\n\n\ndef control(q):\n gs = np.array([pi / 2, 0, 0, 0])\n return -k.dot(q - gs)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n done = reward < 2\n return reward, done\n\n\ndef do_rollout(args):\n x, trial_num = args\n th1, th2, dth1, dth2 = x\n np.random.seed(trial_num)\n local_reward_hist = np.ones((env.num_steps, 1)) * -1\n obs = env.reset(init_vec=[th1, th2, dth1, dth2])\n for i in range(env.num_steps):\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n obs, reward, done, _ = env.step(actions)\n local_reward_hist[i, :] = np.copy(reward)\n if done:\n break\n return local_reward_hist, i\n\n\nstart = time.time()\nconfig = {'init_state': [0, 0, 0, 0], 'max_torque': max_torque,\n 'init_state_weights': [0, 0, 0, 0], 'max_t': 2.5, 'dt': dt, 'm2': m2,\n 'm1': m1, 'l1': l1, 'lc1': lc1, 'lc2': lc2, 'i1': I1, 'i2': I2,\n 'integrator': integrator, 'reward_fn': reward_fn, 'act_hold': 1}\nenv = gym.make('su_acrobot-v0', **config)\nnum_trials = 200000\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\nX = np.zeros((num_trials, 4), dtype=np.float32)\nY = np.zeros((num_trials, 1), dtype=np.float32)\nth1_min = pi / 2 - 0.5\nth1_max = pi / 2 + 0.5\nth2_min = -1\nth2_max = 1\nth1dot_min = -5\nth1dot_max = 5\nth2dot_min = -10\nth2dot_max = 10\nsamples = np.random.random_sample((int(num_trials / 2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\ntotal_steps = 0\npool = Pool()\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i, :] = samples[i, :]\n Y[i] = sum(rews) > env.num_steps * 3 - 10\nth1_min = 0\nth1_max = 2 * pi\nth2_min = -pi\nth2_max = pi\nth1dot_min = -10\nth1dot_max = 10\nth2dot_min = -30\nth2dot_max = 30\nsamples = np.random.random_sample((int(num_trials / 2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\ntotal_steps = 0\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2), int(num_trials))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i + int(num_trials / 2), :] = samples[i, :]\n Y[i + int(num_trials / 2)] = sum(rews) > env.num_steps * 3 - 5\nprint(time.time() - start)\n<mask token>\nnet = MLP(4, 1, 2, 32)\nY0 = np.ones((num_trials, 1), dtype=np.float32)\nw = 0.01\nclass_weight = torch.tensor(Y.shape[0] / sum(Y) * w, dtype=torch.float32)\nloss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.\n BCEWithLogitsLoss(pos_weight=class_weight))\nplt.close()\nplt.plot(loss_hist)\nplt.show()\nn_thdot = 1\nn_th = 1000\nth1_vals = np.linspace(0, 2 * pi, n_th)\nth2_vals = np.linspace(-pi, pi, n_th)\nth1dot_vals = np.linspace(-10, 10, n_th)\nth2dot_vals = np.linspace(-30, 30, n_th)\nsig = torch.nn.Sigmoid()\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\n<mask token>\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\nend = time.time()\nprint(end - start)\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\nx, y = np.meshgrid(th1_vals, th2_vals)\nz = preds\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('Theta')\nax.set_xlabel('Th1')\nax.set_ylabel('Th2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([pi / 2, 0, th1dot_vals[i], th2dot_vals[j]])\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\nend = time.time()\nprint(end - start)\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\nx, y = np.meshgrid(th1dot_vals, th2dot_vals)\nz = preds\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('DTheta')\nax.set_xlabel('dth1')\nax.set_ylabel('dth2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\ntorch.set_default_dtype(torch.float32)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n return reward, False\n\n\ndef do_rollout(trial_num):\n np.random.seed(trial_num)\n act_hold = 20\n hold_count = 0\n obs = env.reset()\n local_lqr = False\n actions = np.random.randn(1) * 3\n local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0])\n )\n local_reward_hist = np.zeros((env.num_steps, 1))\n local_gate_hist = np.zeros((env.num_steps, 1))\n local_action_hist = np.zeros((env.num_steps, 1))\n for i in range(env.num_steps):\n obs = np.array(obs, dtype=np.float32)\n if sig(net(obs)) > 0.85:\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque\n )\n local_lqr = True\n local_gate_hist[i] = 1\n else:\n local_gate_hist[i] = 0\n if hold_count == act_hold:\n actions = np.random.randn(1) * 3\n hold_count = 0\n hold_count += 1\n obs, reward, done, _ = env.step(actions)\n local_action_hist[i, :] = np.copy(actions)\n local_state_hist[i, :] = np.copy(obs)\n local_reward_hist[i, :] = np.copy(reward)\n return (local_action_hist, local_state_hist, local_reward_hist,\n local_gate_hist, local_lqr)\n\n\nconfig = {'init_state': [-pi / 2, 0, 0, 0], 'max_torque': max_torque,\n 'init_state_weights': [1, 1, 5, 5], 'dt': dt, 'm2': m2, 'm1': m1, 'l1':\n l1, 'lc1': lc1, 'lc2': lc2, 'i1': I1, 'i2': I2, 'integrator':\n integrator, 'reward_fn': reward_fn, 'act_hold': 1, 'max_t': 10}\nenv = gym.make('su_acrobot-v0', **config)\nnum_trials = 1000\naction_hist = np.zeros((num_trials, env.num_steps, 1))\nstate_hist = np.zeros((num_trials, env.num_steps, env.observation_space.\n shape[0]))\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\ngate_hist = np.zeros((num_trials, env.num_steps, 1))\nerr_hist = np.zeros((num_trials, 1))\nlqr_list = []\nsuccess_list = []\nact_hold = 20\nhold_count = 0\nobs = env.reset()\nstart = time.time()\npool = Pool()\nfor i, res in enumerate(pool.imap(do_rollout, range(num_trials))):\n acts, obs, rews, gate, lqr_on = res\n action_hist[i, :, :] = acts\n state_hist[i, :, :] = obs\n reward_hist[i, :, :] = rews\n gate_hist[i, :, :] = gate\n err_hist[i] = np.sqrt(sum((state_hist[i, -1, :] - np.array([pi / 2, 0, \n 0, 0])) ** 2))\n if lqr_on:\n lqr_list.append(i)\n if err_hist[i] < 2:\n success_list.append(i)\nprint(len(lqr_list))\nprint(len(success_list))\nprint((time.time() - global_start) / 60)\n",
"step-4": "import numpy as np\nfrom numpy import sin, cos, pi\nimport gym\nimport seagul.envs\nfrom seagul.integration import rk4, euler\nfrom control import lqr, ctrb\nfrom torch.multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport time\nglobal_start = time.time()\nm1 = 1\nm2 = 1\nl1 = 1\nl2 = 2\nlc1 = 0.5\nlc2 = 1\nI1 = 0.083\nI2 = 0.33\ng = 9.8\ndt = 0.01\nmax_torque = 25\nintegrator = euler\nQ = np.identity(4)\nQ[0, 0] = 1\nQ[1, 1] = 1\nQ[2, 2] = 1\nQ[3, 3] = 1\nR = np.identity(2) * 0.01\neval_max_t = 10\nth1 = pi / 2\nth2 = 0\nth1d = 0\nth2d = 0\nTAU = np.array([[0], [1]])\nm11 = m1 * lc1 ** 2 + m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(th2)\n ) + I1 + I2\nm22 = m2 * lc2 ** 2 + I2\nm12 = m2 * (lc2 ** 2 + l1 * lc2 * cos(th2)) + I2\nM = np.array([[m11, m12], [m12, m22]])\nh1 = -m2 * l1 * lc2 * sin(th2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2\n ) * th2d * th1d\nh2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2\nH = np.array([[h1], [h2]])\nphi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)\nphi2 = m2 * lc2 * g * cos(th1 + th2)\nPHI = np.array([[phi1], [phi2]])\nBl = np.linalg.inv(M) @ TAU\nBlin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])\nDPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-\n m2 * lc2 * g, -m2 * lc2 * g]])\nAl = -np.linalg.inv(M) @ DPHI\nAlin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [\n Al[1, 0], Al[1, 1], 0, 0]])\nCtr = ctrb(Alin, Blin)\nassert np.linalg.matrix_rank(Ctr) == 4\nK, S, E = lqr(Alin, Blin, Q, R)\nk = np.array(K[1, :])\nprint(k)\n\n\ndef control(q):\n gs = np.array([pi / 2, 0, 0, 0])\n return -k.dot(q - gs)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n done = reward < 2\n return reward, done\n\n\ndef do_rollout(args):\n x, trial_num = args\n th1, th2, dth1, dth2 = x\n np.random.seed(trial_num)\n local_reward_hist = np.ones((env.num_steps, 1)) * -1\n obs = env.reset(init_vec=[th1, th2, dth1, dth2])\n for i in range(env.num_steps):\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n obs, reward, done, _ = env.step(actions)\n local_reward_hist[i, :] = np.copy(reward)\n if done:\n break\n return local_reward_hist, i\n\n\nstart = time.time()\nconfig = {'init_state': [0, 0, 0, 0], 'max_torque': max_torque,\n 'init_state_weights': [0, 0, 0, 0], 'max_t': 2.5, 'dt': dt, 'm2': m2,\n 'm1': m1, 'l1': l1, 'lc1': lc1, 'lc2': lc2, 'i1': I1, 'i2': I2,\n 'integrator': integrator, 'reward_fn': reward_fn, 'act_hold': 1}\nenv = gym.make('su_acrobot-v0', **config)\nnum_trials = 200000\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\nX = np.zeros((num_trials, 4), dtype=np.float32)\nY = np.zeros((num_trials, 1), dtype=np.float32)\nth1_min = pi / 2 - 0.5\nth1_max = pi / 2 + 0.5\nth2_min = -1\nth2_max = 1\nth1dot_min = -5\nth1dot_max = 5\nth2dot_min = -10\nth2dot_max = 10\nsamples = np.random.random_sample((int(num_trials / 2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\ntotal_steps = 0\npool = Pool()\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i, :] = samples[i, :]\n Y[i] = sum(rews) > env.num_steps * 3 - 10\nth1_min = 0\nth1_max = 2 * pi\nth2_min = -pi\nth2_max = pi\nth1dot_min = -10\nth1dot_max = 10\nth2dot_min = -30\nth2dot_max = 30\nsamples = np.random.random_sample((int(num_trials / 2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min -\n th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\ntotal_steps = 0\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(\n num_trials / 2), int(num_trials))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i + int(num_trials / 2), :] = samples[i, :]\n Y[i + int(num_trials / 2)] = sum(rews) > env.num_steps * 3 - 5\nprint(time.time() - start)\nfrom seagul.nn import MLP, fit_model\nimport torch\nnet = MLP(4, 1, 2, 32)\nY0 = np.ones((num_trials, 1), dtype=np.float32)\nw = 0.01\nclass_weight = torch.tensor(Y.shape[0] / sum(Y) * w, dtype=torch.float32)\nloss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.\n BCEWithLogitsLoss(pos_weight=class_weight))\nplt.close()\nplt.plot(loss_hist)\nplt.show()\nn_thdot = 1\nn_th = 1000\nth1_vals = np.linspace(0, 2 * pi, n_th)\nth2_vals = np.linspace(-pi, pi, n_th)\nth1dot_vals = np.linspace(-10, 10, n_th)\nth2dot_vals = np.linspace(-30, 30, n_th)\nsig = torch.nn.Sigmoid()\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\nfrom itertools import product\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\nend = time.time()\nprint(end - start)\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\nx, y = np.meshgrid(th1_vals, th2_vals)\nz = preds\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('Theta')\nax.set_xlabel('Th1')\nax.set_ylabel('Th2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([pi / 2, 0, th1dot_vals[i], th2dot_vals[j]])\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\nend = time.time()\nprint(end - start)\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\nx, y = np.meshgrid(th1dot_vals, th2dot_vals)\nz = preds\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('DTheta')\nax.set_xlabel('dth1')\nax.set_ylabel('dth2')\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\ntorch.set_default_dtype(torch.float32)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n return reward, False\n\n\ndef do_rollout(trial_num):\n np.random.seed(trial_num)\n act_hold = 20\n hold_count = 0\n obs = env.reset()\n local_lqr = False\n actions = np.random.randn(1) * 3\n local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0])\n )\n local_reward_hist = np.zeros((env.num_steps, 1))\n local_gate_hist = np.zeros((env.num_steps, 1))\n local_action_hist = np.zeros((env.num_steps, 1))\n for i in range(env.num_steps):\n obs = np.array(obs, dtype=np.float32)\n if sig(net(obs)) > 0.85:\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque\n )\n local_lqr = True\n local_gate_hist[i] = 1\n else:\n local_gate_hist[i] = 0\n if hold_count == act_hold:\n actions = np.random.randn(1) * 3\n hold_count = 0\n hold_count += 1\n obs, reward, done, _ = env.step(actions)\n local_action_hist[i, :] = np.copy(actions)\n local_state_hist[i, :] = np.copy(obs)\n local_reward_hist[i, :] = np.copy(reward)\n return (local_action_hist, local_state_hist, local_reward_hist,\n local_gate_hist, local_lqr)\n\n\nconfig = {'init_state': [-pi / 2, 0, 0, 0], 'max_torque': max_torque,\n 'init_state_weights': [1, 1, 5, 5], 'dt': dt, 'm2': m2, 'm1': m1, 'l1':\n l1, 'lc1': lc1, 'lc2': lc2, 'i1': I1, 'i2': I2, 'integrator':\n integrator, 'reward_fn': reward_fn, 'act_hold': 1, 'max_t': 10}\nenv = gym.make('su_acrobot-v0', **config)\nnum_trials = 1000\naction_hist = np.zeros((num_trials, env.num_steps, 1))\nstate_hist = np.zeros((num_trials, env.num_steps, env.observation_space.\n shape[0]))\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\ngate_hist = np.zeros((num_trials, env.num_steps, 1))\nerr_hist = np.zeros((num_trials, 1))\nlqr_list = []\nsuccess_list = []\nact_hold = 20\nhold_count = 0\nobs = env.reset()\nstart = time.time()\npool = Pool()\nfor i, res in enumerate(pool.imap(do_rollout, range(num_trials))):\n acts, obs, rews, gate, lqr_on = res\n action_hist[i, :, :] = acts\n state_hist[i, :, :] = obs\n reward_hist[i, :, :] = rews\n gate_hist[i, :, :] = gate\n err_hist[i] = np.sqrt(sum((state_hist[i, -1, :] - np.array([pi / 2, 0, \n 0, 0])) ** 2))\n if lqr_on:\n lqr_list.append(i)\n if err_hist[i] < 2:\n success_list.append(i)\nprint(len(lqr_list))\nprint(len(success_list))\nprint((time.time() - global_start) / 60)\n",
"step-5": "# %%\nimport numpy as np\nfrom numpy import sin, cos, pi\nimport gym\nimport seagul.envs\n\nfrom seagul.integration import rk4,euler\nfrom control import lqr, ctrb\nfrom torch.multiprocessing import Pool\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n#matplotlib.use('Qt5Agg')\n\nimport time\n\nglobal_start = time.time()\n\n# %%\nm1 = 1\nm2 = 1\nl1 = 1\nl2 = 2\nlc1 = .5\nlc2 = 1\nI1 = .083\nI2 = .33\ng = 9.8\n\n#\n# m1 = 1\n# m2 = 1\n# l1 = 1\n# l2 = 1\n# lc1 = .5\n# lc2 = .5\n# I1 = .2\n# I2 = 1.0\n# g = 9.8\n\n\ndt = .01\nmax_torque = 25\nintegrator = euler\n\nQ = np.identity(4)\nQ[0, 0] = 1\nQ[1, 1] = 1\nQ[2, 2] = 1\nQ[3, 3] = 1\n#\n# Q = np.array([[1000, -500, 0,0],[-500, 1000, 0, 0],[0, 0, 1000, -500],[0,0,-500,1000]])\nR = np.identity(2) * .01\n\neval_max_t = 10\n\n\nth1 = pi / 2\nth2 = 0\nth1d = 0\nth2d = 0\n\nTAU = np.array([[0], [1]])\n\nm11 = m1 * lc1 ** 2 + m2 * (l1 ** 2 + lc2 ** 2 + 2 * l1 * lc2 * cos(th2)) + I1 + I2\nm22 = m2 * lc2 ** 2 + I2\nm12 = m2 * (lc2 ** 2 + l1 * lc2 * cos(th2)) + I2\nM = np.array([[m11, m12], [m12, m22]])\n\nh1 = -m2 * l1 * lc2 * sin(th2) * th2d ** 2 - 2 * m2 * l1 * lc2 * sin(th2) * th2d * th1d\nh2 = m2 * l1 * lc2 * sin(th2) * th1d ** 2\nH = np.array([[h1], [h2]])\n\nphi1 = (m1 * lc1 + m2 * l1) * g * cos(th1) + m2 * lc2 * g * cos(th1 + th2)\nphi2 = m2 * lc2 * g * cos(th1 + th2)\nPHI = np.array([[phi1], [phi2]])\n\nBl = np.linalg.inv(M) @ TAU\nBlin = np.array([[0, 0], [0, 0], [0, Bl[0].item()], [0, Bl[1].item()]])\n\nDPHI = np.array([[-g * (m1 * lc1 + m2 * l1 + m2 * lc2), -m2 * lc2 * g], [-m2 * lc2 * g, -m2 * lc2 * g]])\nAl = -np.linalg.inv(M) @ DPHI\nAlin = np.array([[0, 0, 1, 0], [0, 0, 0, 1], [Al[0, 0], Al[0, 1], 0, 0], [Al[1, 0], Al[1, 1], 0, 0]])\n\nCtr = ctrb(Alin, Blin)\nassert np.linalg.matrix_rank(Ctr) == 4\n\nK, S, E = lqr(Alin, Blin, Q, R)\nk = np.array(K[1, :])\nprint(k)\n\n\ndef control(q):\n gs = np.array([pi / 2, 0, 0, 0])\n return -k.dot(q - gs)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n done = reward < 2\n return reward, done\n\n\ndef do_rollout(args):\n x, trial_num = args\n th1, th2, dth1, dth2 = x\n np.random.seed(trial_num)\n local_reward_hist = np.ones((env.num_steps, 1)) * -1\n obs = env.reset(init_vec=[th1, th2, dth1, dth2])\n\n for i in range(env.num_steps):\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n obs, reward, done, _ = env.step(actions)\n local_reward_hist[i, :] = np.copy(reward)\n if done:\n break\n\n return local_reward_hist, i\n\n\n# %%b\nstart = time.time()\nconfig = {\"init_state\": [0, 0, 0, 0],\n \"max_torque\": max_torque,\n \"init_state_weights\": [0, 0, 0, 0],\n \"max_t\" : 2.5,\n \"dt\": dt,\n \"m2\": m2,\n \"m1\": m1,\n \"l1\": l1,\n \"lc1\": lc1,\n \"lc2\": lc2,\n \"i1\": I1,\n \"i2\": I2,\n \"integrator\" : integrator,\n \"reward_fn\": reward_fn,\n \"act_hold\": 1\n }\nenv = gym.make('su_acrobot-v0', **config)\n\nnum_trials = 200000\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\n\nX = np.zeros((num_trials, 4), dtype=np.float32)\nY = np.zeros((num_trials, 1), dtype=np.float32)\n\nth1_min = pi / 2 - .5\nth1_max = pi / 2 + .5\nth2_min = -1\nth2_max = 1\nth1dot_min = -5\nth1dot_max = 5\nth2dot_min = -10\nth2dot_max = 10\n\nsamples = np.random.random_sample((int(num_trials/2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\n\ntotal_steps = 0\npool = Pool() # defaults to number of available CPU's\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i, :] = samples[i, :]\n Y[i] = sum(rews) > env.num_steps*3 - 10\n\n\nth1_min = 0\nth1_max = 2*pi\nth2_min = -pi\nth2_max = pi\nth1dot_min = -10\nth1dot_max = 10\nth2dot_min = -30\nth2dot_max = 30\n\nsamples = np.random.random_sample((int(num_trials/2), 4))\nsamples *= np.array([th1_min - th1_max, th2_min - th2_max, th1dot_min - th1dot_max, th2dot_min - th2dot_max])\nsamples += np.array([th1_max, th2_max, th1dot_max, th2dot_max])\ntotal_steps = 0\n\n\nfor i, res in enumerate(pool.imap(do_rollout, zip(samples, range(int(num_trials/2), int(num_trials))))):\n rews, steps = res\n reward_hist[i, :, :] = rews\n total_steps += steps\n X[i+int(num_trials/2), :] = samples[i, :]\n Y[i+int(num_trials/2)] = sum(rews) > env.num_steps*3 - 5\n\n\nprint(time.time() - start)\n\n# %%\nfrom seagul.nn import MLP, fit_model\nimport torch\n\nnet = MLP(4, 1, 2, 32) # output_activation=torch.nn.Softmax)\nY0 = np.ones((num_trials, 1), dtype=np.float32)\n\nw = 1e-2\nclass_weight = torch.tensor(Y.shape[0]/sum(Y)*w, dtype=torch.float32)\n\nloss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))\n#loss_hist = fit_model(net, X, Y, 50, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss())\n\n# loss_hist = fit_model(net, X, Y, 100, batch_size=2048)\n# loss_hist = fit_model(net, X, Y0, 5, batch_size=2048, loss_fn=torch.nn.BCEWithLogitsLoss(pos_weight=class_weight))\n\nplt.close()\nplt.plot(loss_hist)\nplt.show()\n\n# %%\n\n\nn_thdot = 1\nn_th = 1000\n\nth1_vals = np.linspace(0, 2*pi, n_th)\nth2_vals = np.linspace(-pi, pi, n_th)\n\nth1dot_vals = np.linspace(-10, 10, n_th)\nth2dot_vals = np.linspace(-30, 30, n_th)\n\nsig = torch.nn.Sigmoid()\n\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\n\nfrom itertools import product\n\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([th1_vals[i], th2_vals[j], 0, 0])\n\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\n\nend = time.time()\n\nprint(end - start)\n\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\n# generate 2 2d grids for the x & y bounds\nx, y = np.meshgrid(th1_vals, th2_vals)\nz = preds\n\n# x and y are bounds, so z should be the value *inside* those bounds.\n# Therefore, remove the last value from the z array.\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\n\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('Theta')\nax.set_xlabel('Th1')\nax.set_ylabel('Th2')\n\n# set the limits of the plot to the limits of the data\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\n\ncoords = np.zeros((n_th, n_th, 4), dtype=np.float32)\n\nstart = time.time()\nfor i, j in product(range(n_th), range(n_th)):\n coords[j, i, :] = np.array([pi/2, 0, th1dot_vals[i], th2dot_vals[j]])\n\npreds = sig(net(coords.reshape(-1, 4)).reshape(n_th, n_th).detach())\nend = time.time()\n\nprint(end - start)\n\nfig, ax = plt.subplots(n_thdot, n_thdot, figsize=(8, 8))\n# generate 2 2d grids for the x & y bounds\nx, y = np.meshgrid(th1dot_vals, th2dot_vals)\nz = preds\n\n# x and y are bounds, so z should be the value *inside* those bounds.\n# Therefore, remove the last value from the z array.\nz = z[:-1, :-1]\nz_min, z_max = 0, np.abs(z).max()\n\nc = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)\nax.set_title('DTheta')\nax.set_xlabel('dth1')\nax.set_ylabel('dth2')\n# set the limits of the plot to the limits of the data\nax.axis([x.min(), x.max(), y.min(), y.max()])\nfig.colorbar(c, ax=ax)\nplt.show()\n\n\n# %%\n\ntorch.set_default_dtype(torch.float32)\n\n\ndef reward_fn(s, a):\n reward = np.sin(s[0]) + 2 * np.sin(s[0] + s[1])\n return reward, False\n\n\ndef do_rollout(trial_num):\n np.random.seed(trial_num)\n act_hold = 20\n hold_count = 0\n\n obs = env.reset()\n local_lqr = False\n\n actions = np.random.randn(1) * 3\n\n local_state_hist = np.zeros((env.num_steps, env.observation_space.shape[0]))\n local_reward_hist = np.zeros((env.num_steps, 1))\n local_gate_hist = np.zeros((env.num_steps, 1))\n local_action_hist = np.zeros((env.num_steps, 1))\n\n for i in range(env.num_steps):\n obs = np.array(obs, dtype=np.float32)\n if sig(net(obs)) > .85:\n actions = np.clip(np.asarray(control(obs)), -max_torque, max_torque)\n local_lqr = True\n local_gate_hist[i] = 1\n else:\n local_gate_hist[i] = 0\n if hold_count == act_hold:\n actions = np.random.randn(1) * 3\n hold_count = 0\n\n hold_count += 1\n obs, reward, done, _ = env.step(actions)\n local_action_hist[i, :] = np.copy(actions)\n local_state_hist[i, :] = np.copy(obs)\n local_reward_hist[i, :] = np.copy(reward)\n\n return local_action_hist, local_state_hist, local_reward_hist, local_gate_hist, local_lqr\n\n\nconfig = {\"init_state\": [-pi / 2, 0, 0, 0],\n \"max_torque\": max_torque,\n \"init_state_weights\": [1, 1, 5, 5],\n \"dt\": dt,\n \"m2\": m2,\n \"m1\": m1,\n \"l1\": l1,\n \"lc1\": lc1,\n \"lc2\": lc2,\n \"i1\": I1,\n \"i2\": I2,\n \"integrator\" : integrator,\n \"reward_fn\": reward_fn,\n \"act_hold\": 1,\n \"max_t\" : 10\n }\n\nenv = gym.make('su_acrobot-v0', **config)\n\nnum_trials = 1000\naction_hist = np.zeros((num_trials, env.num_steps, 1))\nstate_hist = np.zeros((num_trials, env.num_steps, env.observation_space.shape[0]))\nreward_hist = np.zeros((num_trials, env.num_steps, 1))\ngate_hist = np.zeros((num_trials, env.num_steps, 1))\nerr_hist = np.zeros((num_trials, 1))\n\nlqr_list = []\nsuccess_list = []\n\nact_hold = 20\nhold_count = 0\nobs = env.reset()\n\nstart = time.time()\n\npool = Pool() # defaults to number of available CPU's\nfor i, res in enumerate(pool.imap(do_rollout,range(num_trials))):\n acts, obs, rews, gate, lqr_on = res\n action_hist[i, :, :] = acts\n state_hist[i, :, :] = obs\n reward_hist[i, :, :] = rews\n gate_hist[i, :, :] = gate\n err_hist[i] = (np.sqrt(sum(((state_hist[i, -1, :] - np.array([pi / 2, 0, 0, 0])) ** 2))))\n if lqr_on:\n lqr_list.append(i)\n #print(err_hist[i])\n #print(reward_hist[i,-1])\n if err_hist[i] < 2:\n success_list.append(i)\n\n#\n# for i in (range(num_trials)):\n# res = do_rollout(i)\n# acts, obs, rews, gate, lqr_on = res\n# action_hist[i, :, :] = acts\n# state_hist[i, :, :] = obs\n# reward_hist[i, :, :] = rews\n# gate_hist[i, :, :] = gate\n# err_hist[i] = (np.sqrt(sum(((state_hist[i, -1, :] - np.array([pi / 2, 0, 0, 0])) ** 2))))\n# if lqr_on:\n# lqr_list.append(i)\n# #print(err_hist[i])\n# #print(reward_hist[i,-1])\n# if err_hist[i] < 2:\n# success_list.append(i)\n\n\nprint(len(lqr_list))\nprint(len(success_list))\n\nprint((time.time() - global_start) / 60)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-27 08:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('talk', '0023_auto_20180207_1121'),
]
operations = [
migrations.AddField(
model_name='talkmedia',
name='codelink',
field=models.CharField(blank=True, max_length=255, verbose_name='Source code'),
),
]
|
normal
|
{
"blob_id": "f85a703b47d981397ed6048e941030a3fbee7b6d",
"index": 229,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('talk', '0023_auto_20180207_1121')]\n operations = [migrations.AddField(model_name='talkmedia', name=\n 'codelink', field=models.CharField(blank=True, max_length=255,\n verbose_name='Source code'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('talk', '0023_auto_20180207_1121')]\n operations = [migrations.AddField(model_name='talkmedia', name=\n 'codelink', field=models.CharField(blank=True, max_length=255,\n verbose_name='Source code'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.8 on 2018-04-27 08:05\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('talk', '0023_auto_20180207_1121'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='talkmedia',\n name='codelink',\n field=models.CharField(blank=True, max_length=255, verbose_name='Source code'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.1.7 on 2019-03-24 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminsite', '0005_auto_20190324_0706'),
]
operations = [
migrations.RenameField(
model_name='district',
old_name='District',
new_name='district',
),
]
|
normal
|
{
"blob_id": "6e56c7792d88385cc28c48a7d6dd32b9d6917c64",
"index": 2913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-24 07:08\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adminsite', '0005_auto_20190324_0706'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='district',\n old_name='District',\n new_name='district',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.apps import AppConfig
class CfCoreConfig(AppConfig):
name = 'cf_core'
|
normal
|
{
"blob_id": "01847c9e601eae6775cd4324483740c30e344557",
"index": 382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.apps import AppConfig
class PersianConfig(AppConfig):
name = 'persian'
|
normal
|
{
"blob_id": "6b0d1de4c77841f20670331db3332cf87be7ad84",
"index": 3931,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PersianConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PersianConfig(AppConfig):\n name = 'persian'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass PersianConfig(AppConfig):\n name = 'persian'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from rest_framework import serializers
from .models import SensorValue
class SensorValueSerializer(serializers.ModelSerializer):
timestamp = serializers.DateTimeField(required=False)
class Meta:
model = SensorValue
fields = ("id", "timestamp", "sensor_type", "value")
|
normal
|
{
"blob_id": "39312ec60c9ef1c9c95cf4206b6d0bbdb0aedf94",
"index": 9042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-3": "<mask token>\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-4": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n\n class Meta:\n model = SensorValue\n fields = 'id', 'timestamp', 'sensor_type', 'value'\n",
"step-5": "from rest_framework import serializers\nfrom .models import SensorValue\n\n\nclass SensorValueSerializer(serializers.ModelSerializer):\n timestamp = serializers.DateTimeField(required=False)\n\n class Meta:\n model = SensorValue\n fields = (\"id\", \"timestamp\", \"sensor_type\", \"value\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# from django.contrib.auth import forms
# class UserRegister(froms.M):
# class Meta:
# fields = []
|
normal
|
{
"blob_id": "c1f432ff70b21064f36cf9651f8cff9c69361d5c",
"index": 9073,
"step-1": "# from django.contrib.auth import forms\n\n\n\n# class UserRegister(froms.M):\n# class Meta:\n# fields = []\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import logging
import os
from os.path import exists, abspath, join, dirname
from os import mkdir
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MP_NUM_THREADS"] = "1"
from smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner
from plannin_experiment import PlanningExperiment
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
logging.getLogger("smallab").propogate = False
from smallab.specification_generator import SpecificationGenerator
from smallab.runner.runner import ExperimentRunner
from smallab.runner_implementations.main_process_runner import MainRunner
from itertools import product
from sample_sim.memory_mapper_utility import map_memory
from smallab.file_locations import get_experiment_save_directory
import sys
import numpy as np
if __name__ == '__main__':
if "experiments" in os.getcwd():
os.chdir("../..")
this_dir = dirname(abspath(__file__))
for dir_name in ('.cache', '.params'):
path = join(this_dir, dir_name)
if not exists(path):
mkdir(path)
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = "IPP_POMCP"
num_seeds = 5
num_steps = 200
base_specs = {
"plot": False,
"file": ["fn:sbo"],
"seed": list(range(num_seeds)),
"objective_c": 10, # 10 for sbo, 100 for validation envs
"state_space_dimensionality": [[50,50,200]], # for fn:sbo, [[62, 70, 5]], # for validation envs
"rollout_number_goal": [num_steps * 150], # z_steps * 150
"alpha_param": 6,
"beta_param": 1,
"epsilon": 10,
"delta": 0.1,
"sample_observations": False,
"use_expected_improvement": False,
"planning_steps": [num_steps],
}
gen_baseline = base_specs.copy()
gen_baseline.update({
"plan_commitment_algorithm": "n_steps",
"plan_threshold": [1],
"rollout_allocation_method": ["fixed"],
"waste_unused_rollouts": [False],
})
specs_baseline = SpecificationGenerator().generate(gen_baseline)
gen_our_best = base_specs.copy()
gen_our_best.update({
"plan_commitment_algorithm":"tTest",
"plan_threshold":[0.05],
"rollout_allocation_method": ["beta-ugapeb"],
"waste_unused_rollouts": [True],
})
specs_our_best = SpecificationGenerator().generate(gen_our_best)
specifications = []
specifications += specs_baseline
specifications += specs_our_best
print(f"Expt {name}:\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds")
for spec in specifications:
if spec["seed"] == 0:
print(spec)
runner = ExperimentRunner()
map_memory(base_specs["file"], base_specs["state_space_dimensionality"])
DEBUG = False
if DEBUG:
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=True,
specification_runner=MainRunner(), use_dashboard=False, force_pickle=True, context_type="fork")
else:
gpus = 4
jobs_per_gpu = 2
resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=False,
specification_runner=MultiprocessingRunner(), context_type="fork", use_dashboard=True,
force_pickle=True)
|
normal
|
{
"blob_id": "88d8d04dd7117daed0e976f3abc52c5d7bf18434",
"index": 9334,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmpl_logger.setLevel(logging.WARNING)\n<mask token>\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-3": "<mask token>\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['MP_NUM_THREADS'] = '1'\n<mask token>\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger('smallab').propogate = False\n<mask token>\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-4": "import logging\nimport os\nfrom os.path import exists, abspath, join, dirname\nfrom os import mkdir\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['MP_NUM_THREADS'] = '1'\nfrom smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner\nfrom plannin_experiment import PlanningExperiment\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger('smallab').propogate = False\nfrom smallab.specification_generator import SpecificationGenerator\nfrom smallab.runner.runner import ExperimentRunner\nfrom smallab.runner_implementations.main_process_runner import MainRunner\nfrom itertools import product\nfrom sample_sim.memory_mapper_utility import map_memory\nfrom smallab.file_locations import get_experiment_save_directory\nimport sys\nimport numpy as np\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-5": "import logging\nimport os\nfrom os.path import exists, abspath, join, dirname\nfrom os import mkdir\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"MP_NUM_THREADS\"] = \"1\"\n\nfrom smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner\n\nfrom plannin_experiment import PlanningExperiment\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger(\"smallab\").propogate = False\n\nfrom smallab.specification_generator import SpecificationGenerator\nfrom smallab.runner.runner import ExperimentRunner\nfrom smallab.runner_implementations.main_process_runner import MainRunner\nfrom itertools import product\nfrom sample_sim.memory_mapper_utility import map_memory\nfrom smallab.file_locations import get_experiment_save_directory\nimport sys\nimport numpy as np\n\nif __name__ == '__main__':\n if \"experiments\" in os.getcwd():\n os.chdir(\"../..\")\n\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n\n\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = \"IPP_POMCP\"\n num_seeds = 5\n num_steps = 200\n base_specs = {\n \"plot\": False,\n \"file\": [\"fn:sbo\"],\n \"seed\": list(range(num_seeds)),\n \"objective_c\": 10, # 10 for sbo, 100 for validation envs\n \"state_space_dimensionality\": [[50,50,200]], # for fn:sbo, [[62, 70, 5]], # for validation envs\n \"rollout_number_goal\": [num_steps * 150], # z_steps * 150\n \"alpha_param\": 6,\n \"beta_param\": 1,\n \"epsilon\": 10,\n \"delta\": 0.1,\n \"sample_observations\": False,\n \"use_expected_improvement\": False,\n \"planning_steps\": [num_steps],\n }\n\n gen_baseline = base_specs.copy()\n gen_baseline.update({\n \"plan_commitment_algorithm\": \"n_steps\",\n \"plan_threshold\": [1],\n \"rollout_allocation_method\": [\"fixed\"],\n \"waste_unused_rollouts\": [False],\n })\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n\n gen_our_best = base_specs.copy()\n gen_our_best.update({\n \"plan_commitment_algorithm\":\"tTest\",\n \"plan_threshold\":[0.05],\n \"rollout_allocation_method\": [\"beta-ugapeb\"],\n \"waste_unused_rollouts\": [True],\n })\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n\n print(f\"Expt {name}:\\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds\")\n for spec in specifications:\n if spec[\"seed\"] == 0:\n print(spec)\n\n runner = ExperimentRunner()\n map_memory(base_specs[\"file\"], base_specs[\"state_space_dimensionality\"])\n DEBUG = False\n\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=True,\n specification_runner=MainRunner(), use_dashboard=False, force_pickle=True, context_type=\"fork\")\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=False,\n specification_runner=MultiprocessingRunner(), context_type=\"fork\", use_dashboard=True,\n force_pickle=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#####
# Created on Oct 15 13:13:11 2019
#
# @author: inesverissimo
#
# Do pRF fit on median run, make iterative fit and save outputs
####
import os
# issue with tensorflow, try this suggestion
#NUM_PARALLEL_EXEC_UNITS = 16
#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)
#os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
##
import json
import sys
import glob
import re
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
import scipy as sp
import scipy.stats as stats
import nibabel as nb
from nilearn.image import mean_img
from nilearn import surface
from utils import * # import script to use relevante functions
# requires pfpy be installed - preferably with python setup.py develop
from prfpy.rf import *
from prfpy.timecourse import *
from prfpy.stimulus import PRFStimulus2D
from prfpy.grid import Iso2DGaussianGridder
from prfpy.fit import Iso2DGaussianFitter
from popeye import utilities
# define participant number and open json parameter file
if len(sys.argv) < 2:
raise NameError('Please add subject number (ex:1) '
'as 1st argument in the command line!')
elif len(sys.argv) < 3:
raise NameError('Please select server being used (ex: aeneas or cartesius) '
'as 2nd argument in the command line!')
else:
# fill subject number with 0 in case user forgets
sj = str(sys.argv[1]).zfill(2)
json_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(
sys.argv[2]) == 'cartesius' else 'analysis_params.json'
with open(json_dir, 'r') as json_file:
analysis_params = json.load(json_file)
# use smoothed data?
with_smooth = analysis_params['with_smooth']
# define paths and list of files
if str(sys.argv[2]) == 'cartesius':
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')
elif str(sys.argv[2]) == 'aeneas':
print(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
filepath = glob.glob(os.path.join(
analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))
print('functional files from %s' % os.path.split(filepath[0])[0])
out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')
# changes depending on data used
if with_smooth == 'True':
# last part of filename to use
file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']
# compute median run, per hemifield
median_path = os.path.join(
out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')
else:
# last part of filename to use
file_extension = 'cropped_sg_psc.func.gii'
# compute median run, per hemifield
median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')
# list of functional files
filename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(
file_extension)]
filename.sort()
if not os.path.exists(median_path): # check if path to save median run exist
os.makedirs(median_path)
med_gii = []
for field in ['hemi-L', 'hemi-R']:
hemi = [h for h in filename if field in h]
# set name for median run (now numpy array)
med_file = os.path.join(median_path, re.sub(
'run-\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))
# if file doesn't exist
if not os.path.exists(med_file):
med_gii.append(median_gii(hemi, median_path)) # create it
print('computed %s' % (med_gii))
else:
med_gii.append(med_file)
print('median file %s already exists, skipping' % (med_gii))
# create/load design matrix
png_path = '/home/inesv/SB-ref/scripts/imgs/' if str(
sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']
png_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]
png_filename.sort()
dm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')
#if not os.path.exists(dm_filename): # if not exists
screenshot2DM(png_filename, 0.1,
analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it
print('computed %s' % (dm_filename))
#else:
# print('loading %s' % dm_filename)
prf_dm = np.load(dm_filename)
prf_dm = prf_dm.T # then it'll be (x, y, t)
# change DM to see if fit is better like that
# do new one which is average of every 2 TRs
prf_dm = shift_DM(prf_dm)
prf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now
# define model params
fit_model = analysis_params["fit_model"]
TR = analysis_params["TR"]
hrf = utilities.spm_hrf(0,TR)
# make stimulus object, which takes an input design matrix and sets up its real-world dimensions
prf_stim = PRFStimulus2D(screen_size_cm=analysis_params["screen_width"],
screen_distance_cm=analysis_params["screen_distance"],
design_matrix=prf_dm,
TR=TR)
# sets up stimulus and hrf for this gridder
gg = Iso2DGaussianGridder(stimulus=prf_stim,
hrf=hrf,
filter_predictions=False,
window_length=analysis_params["sg_filt_window_length"],
polyorder=analysis_params["sg_filt_polyorder"],
highpass=False,
add_mean=False)
# set grid parameters
grid_nr = analysis_params["grid_steps"]
sizes = analysis_params["max_size"] * np.linspace(np.sqrt(analysis_params["min_size"]/analysis_params["max_size"]),1,grid_nr)**2
eccs = analysis_params["max_eccen"] * np.linspace(np.sqrt(analysis_params["min_eccen"]/analysis_params["max_eccen"]),1,grid_nr)**2
polars = np.linspace(0, 2*np.pi, grid_nr)
for gii_file in med_gii:
print('loading data from %s' % gii_file)
data = np.array(surface.load_surf_data(gii_file))
print('data array with shape %s'%str(data.shape))
gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)
#filename for the numpy array with the estimates of the grid fit
grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')
if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist
print('%s not found, fitting grid'%grid_estimates_filename)
# do grid fit and save estimates
gf.grid_fit(ecc_grid=eccs,
polar_grid=polars,
size_grid=sizes)
np.savez(grid_estimates_filename,
x = gf.gridsearch_params[..., 0],
y = gf.gridsearch_params[..., 1],
size = gf.gridsearch_params[..., 2],
betas = gf.gridsearch_params[...,3],
baseline = gf.gridsearch_params[..., 4],
ns = gf.gridsearch_params[..., 5],
r2 = gf.gridsearch_params[..., 6])
loaded_gf_pars = np.load(grid_estimates_filename)
gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']])
gf.gridsearch_params = np.transpose(gf.gridsearch_params)
# do iterative fit
iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')
if not os.path.isfile(iterative_out): # if estimates file doesn't exist
print('doing iterative fit')
gf.iterative_fit(rsq_threshold=0.1, verbose=False)
np.savez(iterative_out,
it_output=gf.iterative_search_params)
else:
print('%s already exists'%iterative_out)
## do iterative fit again, now with css, n=1 (isn't that just gaussian?)
#print('doing iterative fit with css ')
#gf.fit_css = True
#gf.iterative_fit(rsq_threshold=0.1, verbose=False)
#iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')
#np.savez(iterative_css_out,
# it_output=gf.iterative_search_params)
|
normal
|
{
"blob_id": "d9156e240d49e0a6570a5bc2315f95a7a670fd4f",
"index": 6327,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\n<mask token>\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n<mask token>\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\n<mask token>\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\n<mask token>\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\n<mask token>\npng_filename.sort()\n<mask token>\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\n<mask token>\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-4": "import os\nimport json\nimport sys\nimport glob\nimport re\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\nfrom nilearn import surface\nfrom utils import *\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\nfrom popeye import utilities\nif len(sys.argv) < 2:\n raise NameError(\n 'Please add subject number (ex:1) as 1st argument in the command line!'\n )\nelif len(sys.argv) < 3:\n raise NameError(\n 'Please select server being used (ex: aeneas or cartesius) as 2nd argument in the command line!'\n )\nelse:\n sj = str(sys.argv[1]).zfill(2)\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(sys.argv[2]\n ) == 'cartesius' else 'analysis_params.json'\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\nwith_smooth = analysis_params['with_smooth']\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj),\n '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],\n 'shift_crop')\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(analysis_params['post_fmriprep_outdir'], 'prf',\n 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(analysis_params[\n 'post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'], 'shift_crop')\nif with_smooth == 'True':\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params[\n 'smooth_fwhm']\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],\n 'iterative_fit')\nelse:\n file_extension = 'cropped_sg_psc.func.gii'\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj),\n 'run-median', 'iterative_fit')\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and\n run.endswith(file_extension)]\nfilename.sort()\nif not os.path.exists(median_path):\n os.makedirs(median_path)\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n med_file = os.path.join(median_path, re.sub('run-\\\\d{2}_',\n 'run-median_', os.path.split(hemi[0])[-1]))\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path))\n print('computed %s' % med_gii)\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % med_gii)\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(sys.argv[2]\n ) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\nscreenshot2DM(png_filename, 0.1, analysis_params['screenRes'], dm_filename,\n dm_shape='square')\nprint('computed %s' % dm_filename)\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T\nprf_dm = shift_DM(prf_dm)\nprf_dm = prf_dm[:, :, analysis_params['crop_pRF_TR']:]\nfit_model = analysis_params['fit_model']\nTR = analysis_params['TR']\nhrf = utilities.spm_hrf(0, TR)\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params['screen_width'],\n screen_distance_cm=analysis_params['screen_distance'], design_matrix=\n prf_dm, TR=TR)\ngg = Iso2DGaussianGridder(stimulus=prf_stim, hrf=hrf, filter_predictions=\n False, window_length=analysis_params['sg_filt_window_length'],\n polyorder=analysis_params['sg_filt_polyorder'], highpass=False,\n add_mean=False)\ngrid_nr = analysis_params['grid_steps']\nsizes = analysis_params['max_size'] * np.linspace(np.sqrt(analysis_params[\n 'min_size'] / analysis_params['max_size']), 1, grid_nr) ** 2\neccs = analysis_params['max_eccen'] * np.linspace(np.sqrt(analysis_params[\n 'min_eccen'] / analysis_params['max_eccen']), 1, grid_nr) ** 2\npolars = np.linspace(0, 2 * np.pi, grid_nr)\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s' % str(data.shape))\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n if not os.path.isfile(grid_estimates_filename):\n print('%s not found, fitting grid' % grid_estimates_filename)\n gf.grid_fit(ecc_grid=eccs, polar_grid=polars, size_grid=sizes)\n np.savez(grid_estimates_filename, x=gf.gridsearch_params[..., 0], y\n =gf.gridsearch_params[..., 1], size=gf.gridsearch_params[..., 2\n ], betas=gf.gridsearch_params[..., 3], baseline=gf.\n gridsearch_params[..., 4], ns=gf.gridsearch_params[..., 5], r2=\n gf.gridsearch_params[..., 6])\n loaded_gf_pars = np.load(grid_estimates_filename)\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x',\n 'y', 'size', 'betas', 'baseline', 'ns', 'r2']])\n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n if not os.path.isfile(iterative_out):\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n np.savez(iterative_out, it_output=gf.iterative_search_params)\n else:\n print('%s already exists' % iterative_out)\n",
"step-5": "\n#####\n# Created on Oct 15 13:13:11 2019\n#\n# @author: inesverissimo\n#\n# Do pRF fit on median run, make iterative fit and save outputs\n####\n\nimport os\n\n# issue with tensorflow, try this suggestion\n#NUM_PARALLEL_EXEC_UNITS = 16\n#os.environ['OMP_NUM_THREADS'] = str(NUM_PARALLEL_EXEC_UNITS)\n#os.environ[\"KMP_AFFINITY\"] = \"granularity=fine,verbose,compact,1,0\"\n##\n\nimport json\nimport sys\nimport glob\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport nibabel as nb\nfrom nilearn.image import mean_img\n\nfrom nilearn import surface\n\nfrom utils import * # import script to use relevante functions\n\n# requires pfpy be installed - preferably with python setup.py develop\nfrom prfpy.rf import *\nfrom prfpy.timecourse import *\nfrom prfpy.stimulus import PRFStimulus2D\nfrom prfpy.grid import Iso2DGaussianGridder\nfrom prfpy.fit import Iso2DGaussianFitter\n\nfrom popeye import utilities \n\n# define participant number and open json parameter file\nif len(sys.argv) < 2:\n raise NameError('Please add subject number (ex:1) '\n 'as 1st argument in the command line!')\n\nelif len(sys.argv) < 3:\n raise NameError('Please select server being used (ex: aeneas or cartesius) '\n 'as 2nd argument in the command line!')\n\nelse:\n # fill subject number with 0 in case user forgets\n sj = str(sys.argv[1]).zfill(2)\n\n\njson_dir = '/home/inesv/SB-ref/scripts/analysis_params.json' if str(\n sys.argv[2]) == 'cartesius' else 'analysis_params.json'\n\nwith open(json_dir, 'r') as json_file:\n analysis_params = json.load(json_file)\n\n# use smoothed data?\nwith_smooth = analysis_params['with_smooth']\n\n\n# define paths and list of files\nif str(sys.argv[2]) == 'cartesius':\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir_cartesius'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir_cartesius'],'shift_crop')\n\nelif str(sys.argv[2]) == 'aeneas':\n print(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n filepath = glob.glob(os.path.join(\n analysis_params['post_fmriprep_outdir'], 'prf', 'sub-{sj}'.format(sj=sj), '*'))\n print('functional files from %s' % os.path.split(filepath[0])[0])\n out_dir = os.path.join(analysis_params['pRF_outdir'],'shift_crop')\n\n# changes depending on data used\nif with_smooth == 'True':\n # last part of filename to use\n file_extension = 'cropped_sg_psc_smooth%d.func.gii' % analysis_params['smooth_fwhm']\n # compute median run, per hemifield\n median_path = os.path.join(\n out_dir, 'sub-{sj}'.format(sj=sj), 'run-median', 'smooth%d' % analysis_params['smooth_fwhm'],'iterative_fit')\nelse:\n # last part of filename to use\n file_extension = 'cropped_sg_psc.func.gii'\n # compute median run, per hemifield\n median_path = os.path.join(out_dir, 'sub-{sj}'.format(sj=sj), 'run-median','iterative_fit')\n\n# list of functional files\nfilename = [run for run in filepath if 'prf' in run and 'fsaverage' in run and run.endswith(\n file_extension)]\nfilename.sort()\nif not os.path.exists(median_path): # check if path to save median run exist\n os.makedirs(median_path)\n\n\nmed_gii = []\nfor field in ['hemi-L', 'hemi-R']:\n hemi = [h for h in filename if field in h]\n\n # set name for median run (now numpy array)\n med_file = os.path.join(median_path, re.sub(\n 'run-\\d{2}_', 'run-median_', os.path.split(hemi[0])[-1]))\n # if file doesn't exist\n if not os.path.exists(med_file):\n med_gii.append(median_gii(hemi, median_path)) # create it\n print('computed %s' % (med_gii))\n else:\n med_gii.append(med_file)\n print('median file %s already exists, skipping' % (med_gii))\n\n\n# create/load design matrix\npng_path = '/home/inesv/SB-ref/scripts/imgs/' if str(\n sys.argv[2]) == 'cartesius' else analysis_params['imgs_dir']\npng_filename = [os.path.join(png_path, png) for png in os.listdir(png_path)]\npng_filename.sort()\n\ndm_filename = os.path.join(os.getcwd(), 'prf_dm_square.npy')\n\n#if not os.path.exists(dm_filename): # if not exists\nscreenshot2DM(png_filename, 0.1,\n analysis_params['screenRes'], dm_filename,dm_shape = 'square') # create it\nprint('computed %s' % (dm_filename))\n\n#else:\n# print('loading %s' % dm_filename)\n\nprf_dm = np.load(dm_filename)\nprf_dm = prf_dm.T # then it'll be (x, y, t)\n\n# change DM to see if fit is better like that\n# do new one which is average of every 2 TRs\n\nprf_dm = shift_DM(prf_dm)\n\nprf_dm = prf_dm[:,:,analysis_params['crop_pRF_TR']:] # crop DM because functional data also cropped now\n\n# define model params\nfit_model = analysis_params[\"fit_model\"]\n\nTR = analysis_params[\"TR\"]\n\nhrf = utilities.spm_hrf(0,TR)\n\n# make stimulus object, which takes an input design matrix and sets up its real-world dimensions\nprf_stim = PRFStimulus2D(screen_size_cm=analysis_params[\"screen_width\"], \n screen_distance_cm=analysis_params[\"screen_distance\"], \n design_matrix=prf_dm, \n TR=TR)\n\n# sets up stimulus and hrf for this gridder\ngg = Iso2DGaussianGridder(stimulus=prf_stim,\n hrf=hrf,\n filter_predictions=False,\n window_length=analysis_params[\"sg_filt_window_length\"],\n polyorder=analysis_params[\"sg_filt_polyorder\"],\n highpass=False,\n add_mean=False)\n\n# set grid parameters\ngrid_nr = analysis_params[\"grid_steps\"]\nsizes = analysis_params[\"max_size\"] * np.linspace(np.sqrt(analysis_params[\"min_size\"]/analysis_params[\"max_size\"]),1,grid_nr)**2\neccs = analysis_params[\"max_eccen\"] * np.linspace(np.sqrt(analysis_params[\"min_eccen\"]/analysis_params[\"max_eccen\"]),1,grid_nr)**2\npolars = np.linspace(0, 2*np.pi, grid_nr)\n\nfor gii_file in med_gii:\n print('loading data from %s' % gii_file)\n data = np.array(surface.load_surf_data(gii_file))\n print('data array with shape %s'%str(data.shape))\n\n gf = Iso2DGaussianFitter(data=data, gridder=gg, n_jobs=16, fit_css=False)\n\n #filename for the numpy array with the estimates of the grid fit\n grid_estimates_filename = gii_file.replace('.func.gii', '_estimates.npz')\n\n if not os.path.isfile(grid_estimates_filename): # if estimates file doesn't exist\n print('%s not found, fitting grid'%grid_estimates_filename)\n # do grid fit and save estimates\n gf.grid_fit(ecc_grid=eccs,\n polar_grid=polars,\n size_grid=sizes)\n\n np.savez(grid_estimates_filename,\n x = gf.gridsearch_params[..., 0],\n y = gf.gridsearch_params[..., 1],\n size = gf.gridsearch_params[..., 2],\n betas = gf.gridsearch_params[...,3],\n baseline = gf.gridsearch_params[..., 4],\n ns = gf.gridsearch_params[..., 5],\n r2 = gf.gridsearch_params[..., 6])\n\n\n loaded_gf_pars = np.load(grid_estimates_filename)\n\n gf.gridsearch_params = np.array([loaded_gf_pars[par] for par in ['x', 'y', 'size', 'betas', 'baseline','ns','r2']]) \n gf.gridsearch_params = np.transpose(gf.gridsearch_params)\n\n # do iterative fit\n iterative_out = gii_file.replace('.func.gii', '_iterative_output.npz')\n\n if not os.path.isfile(iterative_out): # if estimates file doesn't exist\n print('doing iterative fit')\n gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n \n np.savez(iterative_out,\n it_output=gf.iterative_search_params)\n else:\n print('%s already exists'%iterative_out)\n\n ## do iterative fit again, now with css, n=1 (isn't that just gaussian?)\n #print('doing iterative fit with css ')\n #gf.fit_css = True\n #gf.iterative_fit(rsq_threshold=0.1, verbose=False)\n\n #iterative_css_out = gii_file.replace('.func.gii', '_iterative_css_output.npz')\n #np.savez(iterative_css_out,\n # it_output=gf.iterative_search_params)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy
import cv2
from keras.models import model_from_json
from keras.layers import Dense
from keras.utils import np_utils
import os
from keras.optimizers import SGD, Adam
numpy.random.seed(42)
file_json = open('model.json', "r")
model_json = file_json.read()
file_json.close()
model = model_from_json(model_json)
model.load_weights('weights.h5')
print('Model loaded')
sgd = SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])
# for i in range(10):
# img = cv2.imread(str(i) + '.png', 0)
# img = cv2.resize(img, (28, 28))
# for i in range(28):
# for j in range(28):
# img[i][j] = abs(img[i][j] - 255)
# print('%4.f' % img[i][j], end='')
# print()
# print()
# print()
# print()
for i in range(10):
img = cv2.imread(str(i) + '.png', 0)
img = cv2.resize(img, (28, 28))
for x in range(28):
for y in range(28):
img[x][y] = abs(img[x][y] - 255)
img = img.astype('float32')
img /= numpy.max(img)
img = numpy.array([img[numpy.newaxis, :, :]])
a = model.predict(img, batch_size=64)
print(i, numpy.argmax(a, axis=None, out=None))
|
normal
|
{
"blob_id": "05021c3b39a0df07ca3d7d1c3ff9d47be6723131",
"index": 4084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.seed(42)\n<mask token>\nfile_json.close()\n<mask token>\nmodel.load_weights('weights.h5')\nprint('Model loaded')\n<mask token>\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[\n 'accuracy'])\nfor i in range(10):\n img = cv2.imread(str(i) + '.png', 0)\n img = cv2.resize(img, (28, 28))\n for x in range(28):\n for y in range(28):\n img[x][y] = abs(img[x][y] - 255)\n img = img.astype('float32')\n img /= numpy.max(img)\n img = numpy.array([img[numpy.newaxis, :, :]])\n a = model.predict(img, batch_size=64)\n print(i, numpy.argmax(a, axis=None, out=None))\n",
"step-3": "<mask token>\nnumpy.random.seed(42)\nfile_json = open('model.json', 'r')\nmodel_json = file_json.read()\nfile_json.close()\nmodel = model_from_json(model_json)\nmodel.load_weights('weights.h5')\nprint('Model loaded')\nsgd = SGD(lr=0.01, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[\n 'accuracy'])\nfor i in range(10):\n img = cv2.imread(str(i) + '.png', 0)\n img = cv2.resize(img, (28, 28))\n for x in range(28):\n for y in range(28):\n img[x][y] = abs(img[x][y] - 255)\n img = img.astype('float32')\n img /= numpy.max(img)\n img = numpy.array([img[numpy.newaxis, :, :]])\n a = model.predict(img, batch_size=64)\n print(i, numpy.argmax(a, axis=None, out=None))\n",
"step-4": "import numpy\nimport cv2\nfrom keras.models import model_from_json\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nimport os\nfrom keras.optimizers import SGD, Adam\nnumpy.random.seed(42)\nfile_json = open('model.json', 'r')\nmodel_json = file_json.read()\nfile_json.close()\nmodel = model_from_json(model_json)\nmodel.load_weights('weights.h5')\nprint('Model loaded')\nsgd = SGD(lr=0.01, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=[\n 'accuracy'])\nfor i in range(10):\n img = cv2.imread(str(i) + '.png', 0)\n img = cv2.resize(img, (28, 28))\n for x in range(28):\n for y in range(28):\n img[x][y] = abs(img[x][y] - 255)\n img = img.astype('float32')\n img /= numpy.max(img)\n img = numpy.array([img[numpy.newaxis, :, :]])\n a = model.predict(img, batch_size=64)\n print(i, numpy.argmax(a, axis=None, out=None))\n",
"step-5": "import numpy\nimport cv2\nfrom keras.models import model_from_json\nfrom keras.layers import Dense\nfrom keras.utils import np_utils\nimport os\nfrom keras.optimizers import SGD, Adam\n\nnumpy.random.seed(42)\nfile_json = open('model.json', \"r\")\nmodel_json = file_json.read()\nfile_json.close()\nmodel = model_from_json(model_json)\nmodel.load_weights('weights.h5')\nprint('Model loaded')\nsgd = SGD(lr=0.01, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy'])\n\n# for i in range(10):\n# img = cv2.imread(str(i) + '.png', 0)\n# img = cv2.resize(img, (28, 28))\n# for i in range(28):\n# for j in range(28):\n# img[i][j] = abs(img[i][j] - 255)\n# print('%4.f' % img[i][j], end='')\n# print()\n# print()\n# print()\n# print()\n\nfor i in range(10):\n\timg = cv2.imread(str(i) + '.png', 0)\n\timg = cv2.resize(img, (28, 28))\n\tfor x in range(28):\n\t\tfor y in range(28):\n\t\t\timg[x][y] = abs(img[x][y] - 255)\n\timg = img.astype('float32')\n\timg /= numpy.max(img)\n\timg = numpy.array([img[numpy.newaxis, :, :]])\n\ta = model.predict(img, batch_size=64)\n\tprint(i, numpy.argmax(a, axis=None, out=None))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from phylo_utils.data import fixed_equal_nucleotide_frequencies
from phylo_utils.substitution_models.tn93 import TN93
class K80(TN93):
_name = 'K80'
_freqs = fixed_equal_nucleotide_frequencies.copy()
def __init__(self, kappa, scale_q=True):
super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q
)
|
normal
|
{
"blob_id": "0f0595793e98187c6aaf5b1f4b59affb06bb598e",
"index": 3159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass K80(TN93):\n <mask token>\n <mask token>\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-3": "<mask token>\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-4": "from phylo_utils.data import fixed_equal_nucleotide_frequencies\nfrom phylo_utils.substitution_models.tn93 import TN93\n\n\nclass K80(TN93):\n _name = 'K80'\n _freqs = fixed_equal_nucleotide_frequencies.copy()\n\n def __init__(self, kappa, scale_q=True):\n super(K80, self).__init__(kappa, kappa, 1, self._freqs, scale_q=scale_q\n )\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
# import sys
# sys.stdin = open("농작물input.txt")
T = int(input())
for n in range(1, T+1):
N = int(input())
arr = [list(map(int, list(input()))) for _ in range(N)]
# print(arr)
a = N//2
b = N//2
result = 0
for i in range(N):
for j in range(a, b+1):
result += arr[i][j]
print(result)
if i < N//2:
a += -1
b += 1
else:
a += 1
b += -1
print("#{0} {1}".format(n, result))
|
normal
|
{
"blob_id": "2236591b3a30f51442beb20c6c43cc9e6cd921d2",
"index": 7530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor n in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n a = N // 2\n b = N // 2\n result = 0\n for i in range(N):\n for j in range(a, b + 1):\n result += arr[i][j]\n print(result)\n if i < N // 2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n print('#{0} {1}'.format(n, result))\n",
"step-3": "T = int(input())\nfor n in range(1, T + 1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n a = N // 2\n b = N // 2\n result = 0\n for i in range(N):\n for j in range(a, b + 1):\n result += arr[i][j]\n print(result)\n if i < N // 2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n print('#{0} {1}'.format(n, result))\n",
"step-4": "# import sys\n# sys.stdin = open(\"농작물input.txt\")\n\nT = int(input())\n\nfor n in range(1, T+1):\n N = int(input())\n arr = [list(map(int, list(input()))) for _ in range(N)]\n # print(arr)\n a = N//2\n b = N//2\n result = 0\n for i in range(N):\n for j in range(a, b+1):\n result += arr[i][j]\n print(result)\n if i < N//2:\n a += -1\n b += 1\n else:\n a += 1\n b += -1\n\n print(\"#{0} {1}\".format(n, result))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Importing the random library for random choice.
import random
getnum = int(input("Pick a number greater than 7: "))
# Error checking.
if (getnum < 7):
print("Error 205: Too little characters entered")
print("Run again using python passwordgenerator.py, or click the run button on your IDE.")
exit()
# A list of random things.
lista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\']
# Main function takes two params, lista and get num.
def main(lista, getnum):
password = ''
for i in range(0, getnum):
passchar = random.choice(lista)
password = password + passchar
print(password)
passwordagain()
#Password again.
def passwordagain():
again = input("Do you want to generate another password(y/n)?: ")
if (again == 'y'):
main(lista,getnum)
elif(again == 'n'):
exit()
else:
print("Sorry, couldn't understand what you were saying.")
passwordagain()
main(lista, getnum)
|
normal
|
{
"blob_id": "c40bb410ad68808c2e0cc636820ec6a2ec2739b8",
"index": 4053,
"step-1": "<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\n<mask token>\n",
"step-2": "<mask token>\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\n<mask token>\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-3": "<mask token>\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-4": "import random\ngetnum = int(input('Pick a number greater than 7: '))\nif getnum < 7:\n print('Error 205: Too little characters entered')\n print(\n 'Run again using python passwordgenerator.py, or click the run button on your IDE.'\n )\n exit()\nlista = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1',\n '2', '3', '4', '5', '6', '7', '8', '9', '0', '#', '@', '!', '%', '^',\n '//', '\\\\']\n\n\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n\n\ndef passwordagain():\n again = input('Do you want to generate another password(y/n)?: ')\n if again == 'y':\n main(lista, getnum)\n elif again == 'n':\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\n\n\nmain(lista, getnum)\n",
"step-5": "# Importing the random library for random choice.\nimport random\ngetnum = int(input(\"Pick a number greater than 7: \"))\n# Error checking.\nif (getnum < 7):\n print(\"Error 205: Too little characters entered\")\n print(\"Run again using python passwordgenerator.py, or click the run button on your IDE.\")\n exit()\n# A list of random things.\nlista = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0','#', '@', '!', '%','^', '//', '\\\\']\n# Main function takes two params, lista and get num.\ndef main(lista, getnum):\n password = ''\n for i in range(0, getnum):\n passchar = random.choice(lista)\n password = password + passchar\n print(password)\n passwordagain()\n#Password again.\ndef passwordagain():\n again = input(\"Do you want to generate another password(y/n)?: \")\n if (again == 'y'):\n main(lista,getnum)\n elif(again == 'n'):\n exit()\n else:\n print(\"Sorry, couldn't understand what you were saying.\")\n passwordagain()\nmain(lista, getnum)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import csv, requests
from bs4 import BeautifulSoup
items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount,
# mainLabel, subLabel, description
mpItems = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount
def openCsv():
"""Open csv file."""
csvFile = 'BDO_app/modules/priceCheck/itemID.csv'
return csvFile
def importAll():
"""Import all the items from csv file."""
csvFile = openCsv()
items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,
# name, pricePerOne, subCategory, subKey, totalTradeCount,
# mainLabel, subLabel, description
with open(csvFile) as i:
readItem = csv.reader(i)
itemRow = next(readItem)
for row in readItem:
items.append(row)
return items
def priceCheck(a, b, c):
"""Read one item from the link."""
mpItem = []
checkedItem = []
url = 'http://omegapepega.com/' + a + '/' + b + '/' + c
# url = http://omegapepega.com/region/mainKey/subKey
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(text=True)
splittedText = results.rsplit('\n')
for line in splittedText:
a = line.rstrip()
mpItem.append(a.lstrip())
mpItem.pop(0)
mpItem.pop(-1)
for i in mpItem:
try:
s = i.index(':')
k = (i[:s])
if i.endswith(','):
v = (i[s+1:-1])
else: v = (i[s+1:])
checkedItem.append(v.strip())
except:
continue
return checkedItem
|
normal
|
{
"blob_id": "47ad08bb153801f592d90c48d62338d0f7703899",
"index": 2788,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef openCsv():\n \"\"\"Open csv file.\"\"\"\n csvFile = 'BDO_app/modules/priceCheck/itemID.csv'\n return csvFile\n\n\ndef importAll():\n \"\"\"Import all the items from csv file.\"\"\"\n csvFile = openCsv()\n items = []\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n return items\n\n\ndef priceCheck(a, b, c):\n \"\"\"Read one item from the link.\"\"\"\n mpItem = []\n checkedItem = []\n url = 'http://omegapepega.com/' + a + '/' + b + '/' + c\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n results = soup.find(text=True)\n splittedText = results.rsplit('\\n')\n for line in splittedText:\n a = line.rstrip()\n mpItem.append(a.lstrip())\n mpItem.pop(0)\n mpItem.pop(-1)\n for i in mpItem:\n try:\n s = i.index(':')\n k = i[:s]\n if i.endswith(','):\n v = i[s + 1:-1]\n else:\n v = i[s + 1:]\n checkedItem.append(v.strip())\n except:\n continue\n return checkedItem\n",
"step-3": "<mask token>\nitems = []\nmpItems = []\n\n\ndef openCsv():\n \"\"\"Open csv file.\"\"\"\n csvFile = 'BDO_app/modules/priceCheck/itemID.csv'\n return csvFile\n\n\ndef importAll():\n \"\"\"Import all the items from csv file.\"\"\"\n csvFile = openCsv()\n items = []\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n return items\n\n\ndef priceCheck(a, b, c):\n \"\"\"Read one item from the link.\"\"\"\n mpItem = []\n checkedItem = []\n url = 'http://omegapepega.com/' + a + '/' + b + '/' + c\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n results = soup.find(text=True)\n splittedText = results.rsplit('\\n')\n for line in splittedText:\n a = line.rstrip()\n mpItem.append(a.lstrip())\n mpItem.pop(0)\n mpItem.pop(-1)\n for i in mpItem:\n try:\n s = i.index(':')\n k = i[:s]\n if i.endswith(','):\n v = i[s + 1:-1]\n else:\n v = i[s + 1:]\n checkedItem.append(v.strip())\n except:\n continue\n return checkedItem\n",
"step-4": "import csv, requests\nfrom bs4 import BeautifulSoup\nitems = []\nmpItems = []\n\n\ndef openCsv():\n \"\"\"Open csv file.\"\"\"\n csvFile = 'BDO_app/modules/priceCheck/itemID.csv'\n return csvFile\n\n\ndef importAll():\n \"\"\"Import all the items from csv file.\"\"\"\n csvFile = openCsv()\n items = []\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n return items\n\n\ndef priceCheck(a, b, c):\n \"\"\"Read one item from the link.\"\"\"\n mpItem = []\n checkedItem = []\n url = 'http://omegapepega.com/' + a + '/' + b + '/' + c\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'html.parser')\n results = soup.find(text=True)\n splittedText = results.rsplit('\\n')\n for line in splittedText:\n a = line.rstrip()\n mpItem.append(a.lstrip())\n mpItem.pop(0)\n mpItem.pop(-1)\n for i in mpItem:\n try:\n s = i.index(':')\n k = i[:s]\n if i.endswith(','):\n v = i[s + 1:-1]\n else:\n v = i[s + 1:]\n checkedItem.append(v.strip())\n except:\n continue\n return checkedItem\n",
"step-5": "import csv, requests\nfrom bs4 import BeautifulSoup\n\n\nitems = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\nmpItems = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount\n\n\ndef openCsv():\n \"\"\"Open csv file.\"\"\"\n csvFile = 'BDO_app/modules/priceCheck/itemID.csv'\n return csvFile\n\n\ndef importAll():\n \"\"\"Import all the items from csv file.\"\"\"\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items\n\n\ndef priceCheck(a, b, c):\n \"\"\"Read one item from the link.\"\"\"\n mpItem = []\n checkedItem = []\n\n url = 'http://omegapepega.com/' + a + '/' + b + '/' + c\n # url = http://omegapepega.com/region/mainKey/subKey\n page = requests.get(url)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n\n results = soup.find(text=True)\n splittedText = results.rsplit('\\n')\n\n for line in splittedText:\n a = line.rstrip()\n mpItem.append(a.lstrip())\n\n mpItem.pop(0)\n mpItem.pop(-1)\n\n for i in mpItem:\n try:\n s = i.index(':')\n k = (i[:s])\n if i.endswith(','):\n v = (i[s+1:-1])\n else: v = (i[s+1:])\n checkedItem.append(v.strip())\n except:\n continue\n\n return checkedItem\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
"""Project agnostic helper functions that could be migrated to and external lib.
"""
|
normal
|
{
"blob_id": "f15bb4ab93ecb2689bf74687852e60dfa98caea9",
"index": 7374,
"step-1": "<mask token>\n",
"step-2": "\"\"\"Project agnostic helper functions that could be migrated to and external lib.\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
columns = ['account', 'name', 'Death', 'archetype', 'profession', 'elite',
'phases.All.actual_boss.dps', 'phases.All.actual.dps',
'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',
'phases.All.buffs.aegis', 'phases.All.buffs.alacrity',
'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment',
'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',
'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',
'phases.All.buffs.quickness', 'phases.All.buffs.regen',
'phases.All.buffs.resist', 'phases.All.buffs.retaliation',
'phases.All.buffs.soothing_mist', 'phases.All.buffs.spirit_of_frost',
'phases.All.buffs.spotter', 'phases.All.buffs.stab',
'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit', 'phases.All.buffs.swift',
'phases.All.buffs.vampiric_presence', 'phases.All.buffs.vigor',
'phases.All.buffs_out.aegis', 'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',
'phases.All.buffs_out.regen', 'phases.All.buffs_out.resist',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stab', 'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.swift', 'phases.All.buffs_out.vampiric_presence',
'phases.All.buffs_out.vigor', 'phases.All.events.dead_time',
'phases.All.events.deaths', 'phases.All.events.disconnect_time',
'phases.All.events.disconnects', 'phases.All.events.down_time',
'phases.All.events.downs', 'phases.All.received.dps',
'phases.All.shielded.dps']
old_columns = ['account', 'name', 'Death', 'archetype', 'profession',
'elite', 'phases.All.actual_boss.dps', 'phases.All.actual.dps',
'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',
'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',
'phases.All.buffs.alacrity', 'phases.All.buffs.assassins_presence',
'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',
'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',
'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',
'phases.All.buffs.glyph_of_empowerment', 'phases.All.buffs.gotl',
'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',
'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',
'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',
'phases.All.buffs.quickness', 'phases.All.buffs.soothing_mist',
'phases.All.buffs.spirit_of_frost', 'phases.All.buffs.spotter',
'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',
'phases.All.buffs.sun_spirit', 'phases.All.buffs.vampiric_presence',
'phases.All.buffs_out.alacrity',
'phases.All.buffs_out.assassins_presence',
'phases.All.buffs_out.banner_defence',
'phases.All.buffs_out.banner_discipline',
'phases.All.buffs_out.banner_strength',
'phases.All.buffs_out.banner_tactics',
'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',
'phases.All.buffs_out.glyph_of_empowerment',
'phases.All.buffs_out.gotl', 'phases.All.buffs_out.lead_attacks',
'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',
'phases.All.buffs_out.naturalistic_resonance',
'phases.All.buffs_out.pinpoint_distribution',
'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',
'phases.All.buffs_out.retaliation',
'phases.All.buffs_out.soothing_mist',
'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',
'phases.All.buffs_out.stone_spirit',
'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',
'phases.All.buffs_out.vampiric_presence', 'phases.All.events.dead_time',
'phases.All.events.deaths', 'phases.All.events.disconnect_time',
'phases.All.events.disconnects', 'phases.All.events.down_time',
'phases.All.events.downs', 'phases.All.received.dps',
'phases.All.shielded.dps']
vg_mechanics = ['phases.All.mechanics.Bullets Eaten',
'phases.All.mechanics.Teleports']
gors_mechanics = ['phases.All.mechanics.Ghastly Imprisonments',
'phases.All.mechanics.Spectral Darkness',
'phases.All.mechanics.Unmitigated Spectral Impacts']
sab_mechanics = []
sloth_mechanics = ['phases.All.mechanics.Spores Blocked',
'phases.All.mechanics.Spores Received',
'phases.All.mechanics.Tantrum Knockdowns',
'phases.All.mechanics.Toxic Cloud Breathed',
'phases.All.mechanics.Volatile Poison Carrier']
matt_mechanics = ['phases.All.mechanics.Burning Stacks Received',
'phases.All.mechanics.Corrupted',
'phases.All.mechanics.Moved While Unbalanced',
'phases.All.mechanics.Sacrificed',
'phases.All.mechanics.Shards Absorbed',
'phases.All.mechanics.Surrender',
'phases.All.mechanics.Well of the Profane Carrier']
kc_mechanics = ['phases.All.mechanics.Correct Orb',
'phases.All.mechanics.Wrong Orb']
xera_mechanics = ['phases.All.mechanics.Derangement']
cairn_mechanics = ['phases.All.mechanics.Displacement',
'phases.All.mechanics.Meteor Swarm',
'phases.All.mechanics.Shared Agony',
'phases.All.mechanics.Spatial Manipulation']
mo_mechanics = ['phases.All.mechanics.Claim', 'phases.All.mechanics.Dispel',
'phases.All.mechanics.Enemy Tile', 'phases.All.mechanics.Protect',
"phases.All.mechanics.Soldier's Aura"]
sam_mechanics = ['phases.All.mechanics.Anguished Bolt',
'phases.All.mechanics.Big Friend', 'phases.All.mechanics.Bludgeon',
'phases.All.mechanics.Charge', 'phases.All.mechanics.Claw',
'phases.All.mechanics.Fixate',
'phases.All.mechanics.Inevitable Betrayl',
'phases.All.mechanics.Prisoner Sweep', 'phases.All.mechanics.Shockwave',
'phases.All.mechanics.Small Friend', 'phases.All.mechanics.Spear Impact']
deimos_mechanics = ['phases.All.mechanics.Annihilate',
'phases.All.mechanics.Demonic Shockwave',
'phases.All.mechanics.Mind Crush', 'phases.All.mechanics.Rapid Decay',
'phases.All.mechanics.Soul Feast', 'phases.All.mechanics.Tear Consumed',
'phases.All.mechanics.Teleports']
sh_mechanics = ['phases.All.mechanics.Inner Vortex',
'phases.All.mechanics.Necrosis Received',
'phases.All.mechanics.Outer Vortex', 'phases.All.mechanics.Quad Slash',
'phases.All.mechanics.Scythe Hits', 'phases.All.mechanics.Soul Rift']
dhuum_mechanics = ['phases.All.mechanics.Death Marked',
'phases.All.mechanics.Dhuum Gaze', 'phases.All.mechanics.Fissured',
'phases.All.mechanics.Messenger', 'phases.All.mechanics.Putrid Bomb',
'phases.All.mechanics.Shackle Hits', 'phases.All.mechanics.Snatched',
'phases.All.mechanics.Sucked']
|
normal
|
{
"blob_id": "fa948838b5c2d688fe8c748166f23ffc8e677f93",
"index": 9265,
"step-1": "<mask token>\n",
"step-2": "columns = ['account', 'name', 'Death', 'archetype', 'profession', 'elite',\n 'phases.All.actual_boss.dps', 'phases.All.actual.dps',\n 'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',\n 'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',\n 'phases.All.buffs.aegis', 'phases.All.buffs.alacrity',\n 'phases.All.buffs.assassins_presence',\n 'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',\n 'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',\n 'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',\n 'phases.All.buffs.glyph_of_empowerment',\n 'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',\n 'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',\n 'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',\n 'phases.All.buffs.quickness', 'phases.All.buffs.regen',\n 'phases.All.buffs.resist', 'phases.All.buffs.retaliation',\n 'phases.All.buffs.soothing_mist', 'phases.All.buffs.spirit_of_frost',\n 'phases.All.buffs.spotter', 'phases.All.buffs.stab',\n 'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',\n 'phases.All.buffs.sun_spirit', 'phases.All.buffs.swift',\n 'phases.All.buffs.vampiric_presence', 'phases.All.buffs.vigor',\n 'phases.All.buffs_out.aegis', 'phases.All.buffs_out.alacrity',\n 'phases.All.buffs_out.assassins_presence',\n 'phases.All.buffs_out.banner_defence',\n 'phases.All.buffs_out.banner_discipline',\n 'phases.All.buffs_out.banner_strength',\n 'phases.All.buffs_out.banner_tactics',\n 'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',\n 'phases.All.buffs_out.glyph_of_empowerment',\n 'phases.All.buffs_out.lead_attacks',\n 'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',\n 'phases.All.buffs_out.naturalistic_resonance',\n 'phases.All.buffs_out.pinpoint_distribution',\n 'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',\n 'phases.All.buffs_out.regen', 'phases.All.buffs_out.resist',\n 'phases.All.buffs_out.retaliation',\n 'phases.All.buffs_out.soothing_mist',\n 'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',\n 'phases.All.buffs_out.stab', 'phases.All.buffs_out.stone_spirit',\n 'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',\n 'phases.All.buffs_out.swift', 'phases.All.buffs_out.vampiric_presence',\n 'phases.All.buffs_out.vigor', 'phases.All.events.dead_time',\n 'phases.All.events.deaths', 'phases.All.events.disconnect_time',\n 'phases.All.events.disconnects', 'phases.All.events.down_time',\n 'phases.All.events.downs', 'phases.All.received.dps',\n 'phases.All.shielded.dps']\nold_columns = ['account', 'name', 'Death', 'archetype', 'profession',\n 'elite', 'phases.All.actual_boss.dps', 'phases.All.actual.dps',\n 'phases.All.actual_boss.flanking', 'phases.All.actual_boss.scholar',\n 'phases.All.actual_boss.condi_dps', 'phases.All.actual_boss.power_dps',\n 'phases.All.buffs.alacrity', 'phases.All.buffs.assassins_presence',\n 'phases.All.buffs.banner_defence', 'phases.All.buffs.banner_discipline',\n 'phases.All.buffs.banner_strength', 'phases.All.buffs.banner_tactics',\n 'phases.All.buffs.empower_allies', 'phases.All.buffs.fury',\n 'phases.All.buffs.glyph_of_empowerment', 'phases.All.buffs.gotl',\n 'phases.All.buffs.lead_attacks', 'phases.All.buffs.lotus_training',\n 'phases.All.buffs.might', 'phases.All.buffs.naturalistic_resonance',\n 'phases.All.buffs.pinpoint_distribution', 'phases.All.buffs.protection',\n 'phases.All.buffs.quickness', 'phases.All.buffs.soothing_mist',\n 'phases.All.buffs.spirit_of_frost', 'phases.All.buffs.spotter',\n 'phases.All.buffs.stone_spirit', 'phases.All.buffs.storm_spirit',\n 'phases.All.buffs.sun_spirit', 'phases.All.buffs.vampiric_presence',\n 'phases.All.buffs_out.alacrity',\n 'phases.All.buffs_out.assassins_presence',\n 'phases.All.buffs_out.banner_defence',\n 'phases.All.buffs_out.banner_discipline',\n 'phases.All.buffs_out.banner_strength',\n 'phases.All.buffs_out.banner_tactics',\n 'phases.All.buffs_out.empower_allies', 'phases.All.buffs_out.fury',\n 'phases.All.buffs_out.glyph_of_empowerment',\n 'phases.All.buffs_out.gotl', 'phases.All.buffs_out.lead_attacks',\n 'phases.All.buffs_out.lotus_training', 'phases.All.buffs_out.might',\n 'phases.All.buffs_out.naturalistic_resonance',\n 'phases.All.buffs_out.pinpoint_distribution',\n 'phases.All.buffs_out.protection', 'phases.All.buffs_out.quickness',\n 'phases.All.buffs_out.retaliation',\n 'phases.All.buffs_out.soothing_mist',\n 'phases.All.buffs_out.spirit_of_frost', 'phases.All.buffs_out.spotter',\n 'phases.All.buffs_out.stone_spirit',\n 'phases.All.buffs_out.storm_spirit', 'phases.All.buffs_out.sun_spirit',\n 'phases.All.buffs_out.vampiric_presence', 'phases.All.events.dead_time',\n 'phases.All.events.deaths', 'phases.All.events.disconnect_time',\n 'phases.All.events.disconnects', 'phases.All.events.down_time',\n 'phases.All.events.downs', 'phases.All.received.dps',\n 'phases.All.shielded.dps']\nvg_mechanics = ['phases.All.mechanics.Bullets Eaten',\n 'phases.All.mechanics.Teleports']\ngors_mechanics = ['phases.All.mechanics.Ghastly Imprisonments',\n 'phases.All.mechanics.Spectral Darkness',\n 'phases.All.mechanics.Unmitigated Spectral Impacts']\nsab_mechanics = []\nsloth_mechanics = ['phases.All.mechanics.Spores Blocked',\n 'phases.All.mechanics.Spores Received',\n 'phases.All.mechanics.Tantrum Knockdowns',\n 'phases.All.mechanics.Toxic Cloud Breathed',\n 'phases.All.mechanics.Volatile Poison Carrier']\nmatt_mechanics = ['phases.All.mechanics.Burning Stacks Received',\n 'phases.All.mechanics.Corrupted',\n 'phases.All.mechanics.Moved While Unbalanced',\n 'phases.All.mechanics.Sacrificed',\n 'phases.All.mechanics.Shards Absorbed',\n 'phases.All.mechanics.Surrender',\n 'phases.All.mechanics.Well of the Profane Carrier']\nkc_mechanics = ['phases.All.mechanics.Correct Orb',\n 'phases.All.mechanics.Wrong Orb']\nxera_mechanics = ['phases.All.mechanics.Derangement']\ncairn_mechanics = ['phases.All.mechanics.Displacement',\n 'phases.All.mechanics.Meteor Swarm',\n 'phases.All.mechanics.Shared Agony',\n 'phases.All.mechanics.Spatial Manipulation']\nmo_mechanics = ['phases.All.mechanics.Claim', 'phases.All.mechanics.Dispel',\n 'phases.All.mechanics.Enemy Tile', 'phases.All.mechanics.Protect',\n \"phases.All.mechanics.Soldier's Aura\"]\nsam_mechanics = ['phases.All.mechanics.Anguished Bolt',\n 'phases.All.mechanics.Big Friend', 'phases.All.mechanics.Bludgeon',\n 'phases.All.mechanics.Charge', 'phases.All.mechanics.Claw',\n 'phases.All.mechanics.Fixate',\n 'phases.All.mechanics.Inevitable Betrayl',\n 'phases.All.mechanics.Prisoner Sweep', 'phases.All.mechanics.Shockwave',\n 'phases.All.mechanics.Small Friend', 'phases.All.mechanics.Spear Impact']\ndeimos_mechanics = ['phases.All.mechanics.Annihilate',\n 'phases.All.mechanics.Demonic Shockwave',\n 'phases.All.mechanics.Mind Crush', 'phases.All.mechanics.Rapid Decay',\n 'phases.All.mechanics.Soul Feast', 'phases.All.mechanics.Tear Consumed',\n 'phases.All.mechanics.Teleports']\nsh_mechanics = ['phases.All.mechanics.Inner Vortex',\n 'phases.All.mechanics.Necrosis Received',\n 'phases.All.mechanics.Outer Vortex', 'phases.All.mechanics.Quad Slash',\n 'phases.All.mechanics.Scythe Hits', 'phases.All.mechanics.Soul Rift']\ndhuum_mechanics = ['phases.All.mechanics.Death Marked',\n 'phases.All.mechanics.Dhuum Gaze', 'phases.All.mechanics.Fissured',\n 'phases.All.mechanics.Messenger', 'phases.All.mechanics.Putrid Bomb',\n 'phases.All.mechanics.Shackle Hits', 'phases.All.mechanics.Snatched',\n 'phases.All.mechanics.Sucked']\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.forms import ModelForm, ChoiceField, Form, FileField, ModelChoiceField, HiddenInput, ValidationError
from market.models import *
class OrderForm(ModelForm):
"""Order form used in trader view."""
# from http://stackoverflow.com/questions/1697702/how-to-pass-initial-parameter-to-djangos-modelform-instance/1697770#1697770
# price from http://stackoverflow.com/questions/6473895/how-to-restrict-values-in-a-django-decimalfield
# restricts prices to 0.0 through 2.0
PRICE_CHOICES = [(i*.01, str(i*.01)) for i in range(1,201)]
price = ChoiceField(choices=PRICE_CHOICES)
trader = ModelChoiceField(label='', queryset=Trader.objects.all(), widget=HiddenInput())
market = ModelChoiceField(label='', queryset=Market.objects.all(), widget=HiddenInput())
def clean(self):
"""Validates the data. Ensures the trader has enough cash or shares
to complete the requested order."""
cleaned_data = self.cleaned_data
if cleaned_data.get('order') and cleaned_data.get('stock') \
and cleaned_data.get('volume') and cleaned_data.get('price'):
t = cleaned_data['trader']
if cleaned_data['order'] == 'B': # buy order
open_orders = Order.objects.filter(trader=t,
order='B', completed=False)
open_order_value = float(sum([o.volume * o.price for o in open_orders]))
open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])
if open_order_value > t.cash:
raise ValidationError("You don't have enough cash!")
elif cleaned_data['order'] == 'S': # sell order!
open_orders = sum(Order.objects.filter(trader=t, order='S',
stock=cleaned_data['stock'],
completed=False).values_list('volume', flat=True))
open_orders += cleaned_data['volume']
if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:
raise ValidationError("You don't have enough shares!")
return cleaned_data
class Meta:
model = Order
fields = ('stock', 'order', 'volume', 'price', 'trader', 'market')
class UploadFileForm(Form):
file = FileField()
|
normal
|
{
"blob_id": "044e3479c32357e22ca3165d8601d8bd2a439fcb",
"index": 2329,
"step-1": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-2": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-3": "<mask token>\n\n\nclass OrderForm(ModelForm):\n <mask token>\n PRICE_CHOICES = [(i * 0.01, str(i * 0.01)) for i in range(1, 201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(),\n widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(),\n widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-4": "<mask token>\n\n\nclass OrderForm(ModelForm):\n \"\"\"Order form used in trader view.\"\"\"\n PRICE_CHOICES = [(i * 0.01, str(i * 0.01)) for i in range(1, 201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(),\n widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(),\n widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock'\n ) and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B':\n open_orders = Order.objects.filter(trader=t, order='B',\n completed=False)\n open_order_value = float(sum([(o.volume * o.price) for o in\n open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(\n cleaned_data['price'])\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n elif cleaned_data['order'] == 'S':\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'], completed=False).\n values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']\n ).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n\n class Meta:\n model = Order\n fields = 'stock', 'order', 'volume', 'price', 'trader', 'market'\n\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-5": "from django.forms import ModelForm, ChoiceField, Form, FileField, ModelChoiceField, HiddenInput, ValidationError\nfrom market.models import *\n\nclass OrderForm(ModelForm):\n \"\"\"Order form used in trader view.\"\"\"\n # from http://stackoverflow.com/questions/1697702/how-to-pass-initial-parameter-to-djangos-modelform-instance/1697770#1697770\n # price from http://stackoverflow.com/questions/6473895/how-to-restrict-values-in-a-django-decimalfield\n\n # restricts prices to 0.0 through 2.0\n PRICE_CHOICES = [(i*.01, str(i*.01)) for i in range(1,201)]\n price = ChoiceField(choices=PRICE_CHOICES)\n trader = ModelChoiceField(label='', queryset=Trader.objects.all(), widget=HiddenInput())\n market = ModelChoiceField(label='', queryset=Market.objects.all(), widget=HiddenInput())\n\n def clean(self):\n \"\"\"Validates the data. Ensures the trader has enough cash or shares\n to complete the requested order.\"\"\"\n\n cleaned_data = self.cleaned_data\n if cleaned_data.get('order') and cleaned_data.get('stock') \\\n and cleaned_data.get('volume') and cleaned_data.get('price'):\n t = cleaned_data['trader']\n if cleaned_data['order'] == 'B': # buy order\n open_orders = Order.objects.filter(trader=t,\n order='B', completed=False)\n open_order_value = float(sum([o.volume * o.price for o in open_orders]))\n open_order_value += int(cleaned_data['volume']) * float(cleaned_data['price'])\n\n if open_order_value > t.cash:\n raise ValidationError(\"You don't have enough cash!\")\n\n elif cleaned_data['order'] == 'S': # sell order!\n open_orders = sum(Order.objects.filter(trader=t, order='S',\n stock=cleaned_data['stock'],\n completed=False).values_list('volume', flat=True))\n open_orders += cleaned_data['volume']\n\n if open_orders > t.holding_set.get(stock=cleaned_data['stock']).shares:\n raise ValidationError(\"You don't have enough shares!\")\n return cleaned_data\n\n class Meta:\n model = Order\n fields = ('stock', 'order', 'volume', 'price', 'trader', 'market')\n\nclass UploadFileForm(Form):\n file = FileField()\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
from simulating_blobs_of_fluid.simulation import Simulation
from simulating_blobs_of_fluid.fluid_renderer import FluidRenderer
import arcade
def main():
simulation = Simulation(particle_count=50, dt=0.016, box_width=250)
FluidRenderer(simulation.box_width, 800, simulation)
arcade.run()
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "83733e707a1be131335c4980cdf4beed365eb530",
"index": 6011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n simulation = Simulation(particle_count=50, dt=0.016, box_width=250)\n FluidRenderer(simulation.box_width, 800, simulation)\n arcade.run()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n simulation = Simulation(particle_count=50, dt=0.016, box_width=250)\n FluidRenderer(simulation.box_width, 800, simulation)\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from simulating_blobs_of_fluid.simulation import Simulation\nfrom simulating_blobs_of_fluid.fluid_renderer import FluidRenderer\nimport arcade\n\n\ndef main():\n simulation = Simulation(particle_count=50, dt=0.016, box_width=250)\n FluidRenderer(simulation.box_width, 800, simulation)\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from simulating_blobs_of_fluid.simulation import Simulation\nfrom simulating_blobs_of_fluid.fluid_renderer import FluidRenderer\n\nimport arcade\n\n\ndef main():\n simulation = Simulation(particle_count=50, dt=0.016, box_width=250)\n FluidRenderer(simulation.box_width, 800, simulation)\n\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import pandas as pd
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
name="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv"
name_bkg="/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv"
drop_cols=[0,1,2,15]
names = [i for i in range(16)]
#columns=[] #list of columns we want to take
file_df_sig=pd.read_csv(name, sep=",",names=names)
tmp_df_sig = file_df_sig.drop(drop_cols, axis=1)
file_df_bkg = pd.read_csv(name_bkg, sep=",",names=names)
tmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)
tmp_df = pd.concat([tmp_df_sig , tmp_df_bkg] , ignore_index=True)
#fig , ax = plt.subplots()
#tmp_df.hist(bins=10,ax=ax)
#fig.savefig("before_pca.pdf")
pca=PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)
pca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)
#fig , ax = plt.subplots()
#df.hist(bins=10,ax=ax)
#fig.savefig("after_pca.pdf")
final_df= pd.concat([file_df_sig , file_df_bkg] , ignore_index=True)
print("Before PCA" , final_df)
for i in pca_df.columns :
final_df[i]=pca_df[i]
print("After PCA" , final_df)
cut=len(file_df_sig.index)
final_df.iloc[:cut].to_csv("pca_stop_train_sig_wc.csv",header= False,index=False)
final_df.iloc[cut:].to_csv("pca_stop_train_bkg_wc.csv",header= False , index =False)
|
normal
|
{
"blob_id": "f8bb2851192a53e94e503c0c63b17477878ad9a7",
"index": 6926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\n<mask token>\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-3": "<mask token>\nname = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv'\nname_bkg = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv'\ndrop_cols = [0, 1, 2, 15]\nnames = [i for i in range(16)]\nfile_df_sig = pd.read_csv(name, sep=',', names=names)\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\nfile_df_bkg = pd.read_csv(name_bkg, sep=',', names=names)\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\ntmp_df = pd.concat([tmp_df_sig, tmp_df_bkg], ignore_index=True)\npca = PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\nfinal_df = pd.concat([file_df_sig, file_df_bkg], ignore_index=True)\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\ncut = len(file_df_sig.index)\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-4": "import os\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nname = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv'\nname_bkg = '/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv'\ndrop_cols = [0, 1, 2, 15]\nnames = [i for i in range(16)]\nfile_df_sig = pd.read_csv(name, sep=',', names=names)\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\nfile_df_bkg = pd.read_csv(name_bkg, sep=',', names=names)\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\ntmp_df = pd.concat([tmp_df_sig, tmp_df_bkg], ignore_index=True)\npca = PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\nfinal_df = pd.concat([file_df_sig, file_df_bkg], ignore_index=True)\nprint('Before PCA', final_df)\nfor i in pca_df.columns:\n final_df[i] = pca_df[i]\nprint('After PCA', final_df)\ncut = len(file_df_sig.index)\nfinal_df.iloc[:cut].to_csv('pca_stop_train_sig_wc.csv', header=False, index\n =False)\nfinal_df.iloc[cut:].to_csv('pca_stop_train_bkg_wc.csv', header=False, index\n =False)\n",
"step-5": "import os\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt \r\n\r\nname=\"/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_sig_wc.csv\"\r\nname_bkg=\"/home/t3cms/thessel/Workflow1.5/stop_data/stop_train_bkg_wc.csv\"\r\ndrop_cols=[0,1,2,15]\r\nnames = [i for i in range(16)]\r\n#columns=[] #list of columns we want to take\r\nfile_df_sig=pd.read_csv(name, sep=\",\",names=names)\r\ntmp_df_sig = file_df_sig.drop(drop_cols, axis=1)\r\n\r\nfile_df_bkg = pd.read_csv(name_bkg, sep=\",\",names=names)\r\ntmp_df_bkg = file_df_bkg.drop(drop_cols, axis=1)\r\n\r\ntmp_df = pd.concat([tmp_df_sig , tmp_df_bkg] , ignore_index=True)\r\n\r\n\r\n\r\n#fig , ax = plt.subplots()\r\n#tmp_df.hist(bins=10,ax=ax)\r\n#fig.savefig(\"before_pca.pdf\")\r\n\r\npca=PCA(n_components=len(tmp_df.columns)).fit_transform(tmp_df)\r\n\r\n\r\n\r\npca_df = pd.DataFrame(data=pca, columns=tmp_df.columns)\r\n\r\n#fig , ax = plt.subplots()\r\n#df.hist(bins=10,ax=ax)\r\n#fig.savefig(\"after_pca.pdf\")\r\n\r\nfinal_df= pd.concat([file_df_sig , file_df_bkg] , ignore_index=True)\r\n\r\nprint(\"Before PCA\" , final_df)\r\n\r\nfor i in pca_df.columns :\r\n\tfinal_df[i]=pca_df[i]\r\n\t\r\nprint(\"After PCA\" , final_df)\r\n\r\ncut=len(file_df_sig.index)\r\n\r\n\r\nfinal_df.iloc[:cut].to_csv(\"pca_stop_train_sig_wc.csv\",header= False,index=False)\r\nfinal_df.iloc[cut:].to_csv(\"pca_stop_train_bkg_wc.csv\",header= False , index =False)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2020- Spyder Project Contributors
#
# Released under the terms of the MIT License
# ----------------------------------------------------------------------------
"""Tests for the execution of pylint."""
# Standard library imports
from io import open
import os.path as osp
from unittest.mock import Mock, MagicMock
# Third party imports
import pytest
from qtpy.QtCore import Signal, QObject
# Local imports
from spyder.plugins.pylint.plugin import Pylint
from spyder.plugins.pylint.widgets.pylintgui import PylintWidget
from spyder.plugins.pylint.utils import get_pylintrc_path
# pylint: disable=redefined-outer-name
PYLINTRC_FILENAME = ".pylintrc"
# Constants for dir name keys
# In Python 3 and Spyder 5, replace with enum
NO_DIR = "e"
SCRIPT_DIR = "SCRIPT_DIR"
WORKING_DIR = "WORKING_DIR"
PROJECT_DIR = "PROJECT_DIR"
HOME_DIR = "HOME_DIR"
ALL_DIR = "ALL_DIR"
DIR_LIST = [SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]
DIR_LIST_ALL = [NO_DIR] + DIR_LIST + [ALL_DIR]
PYLINT_TEST_SCRIPT = "import math\nimport os\nimport sys\n" + "\n".join(
[dir_name + " = " + str(idx) for idx, dir_name in enumerate(DIR_LIST_ALL)])
PYLINT_TEST_SCRIPT = "\"\"\"Docstring.\"\"\"\n" + PYLINT_TEST_SCRIPT + "\n"
PYLINTRC_TEST_CONTENTS = """
[MESSAGES CONTROL]
enable=blacklisted-name
[BASIC]
bad-names={bad_names}
good-names=e
"""
class MainWindowMock(QObject):
sig_editor_focus_changed = Signal(str)
def __init__(self):
super(MainWindowMock, self).__init__(None)
self.editor = Mock()
self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed
self.projects = MagicMock()
@pytest.fixture
def pylintrc_search_paths(tmp_path_factory):
"""Construct temporary .pylintrc search paths."""
search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name))
for dir_name in DIR_LIST}
return search_paths
@pytest.fixture
def pylint_test_script(pylintrc_search_paths):
"""Write a script for testing Pylint to a temporary directory."""
script_path = osp.join(
pylintrc_search_paths[SCRIPT_DIR], "test_script.py")
with open(script_path, mode="w",
encoding="utf-8", newline="\n") as script_file:
script_file.write(PYLINT_TEST_SCRIPT)
return script_path
@pytest.fixture
def pylint_test_scripts(pylintrc_search_paths):
def _pylint_test_scripts(filenames):
"""Write scripts for testing Pylint to a temporary directory."""
script_paths = []
for filename in filenames:
script_path = osp.join(
pylintrc_search_paths[SCRIPT_DIR], filename)
with open(script_path, mode="w",
encoding="utf-8", newline="\n") as script_file:
script_file.write(PYLINT_TEST_SCRIPT)
script_paths.append(script_path)
return script_paths
return _pylint_test_scripts
@pytest.fixture(
params=[
[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [HOME_DIR],
[SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR],
[SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR],
[SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]],
ids=["None", "Script", "Working", "Project", "Home", "Script & Home",
"Working & Project", "Script & Working", "Project & Home", "All"])
def pylintrc_files(pylintrc_search_paths, request):
"""Store test .pylintrc files at the paths and determine the result."""
search_paths = pylintrc_search_paths
# Determine the bad names that should be reported
pylintrc_locations = request.param
bad_names = [ALL_DIR]
for search_path_name, search_path in search_paths.items():
if search_path_name in pylintrc_locations:
expected_path = osp.join(search_path, PYLINTRC_FILENAME)
bad_names += [search_path_name]
break
else:
expected_path = None
bad_names = [NO_DIR]
# Store the selected pylintrc files at the designated paths
for location in pylintrc_locations:
pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(
bad_names=", ".join([location, ALL_DIR]))
pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)
with open(pylintrc_path, mode="w",
encoding="utf-8", newline="\n") as rc_file:
rc_file.write(pylintrc_test_contents)
return search_paths, expected_path, bad_names
def test_get_pylintrc_path(pylintrc_files, mocker):
"""Test that get_pylintrc_path finds the expected one in the hiearchy."""
search_paths, expected_path, __ = pylintrc_files
mocker.patch("pylint.config.os.path.expanduser",
return_value=search_paths[HOME_DIR])
actual_path = get_pylintrc_path(
search_paths=list(search_paths.values()),
home_path=search_paths[HOME_DIR],
)
assert actual_path == expected_path
def test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):
"""Test that pylint works without errors with no project open."""
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.analyze(filename=pylint_test_script)
qtbot.waitUntil(
lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,
timeout=5000)
pylint_data = pylint_widget.get_data(filename=pylint_test_script)
print(pylint_data)
assert pylint_data
assert pylint_data[0] is not None
assert pylint_data[1] is not None
def test_pylint_widget_pylintrc(
pylint_test_script, pylintrc_files, mocker, qtbot):
"""Test that entire pylint widget gets results depending on pylintrc."""
search_paths, __, bad_names = pylintrc_files
mocker.patch("pylint.config.os.path.expanduser",
return_value=search_paths[HOME_DIR])
mocker.patch("spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home",
return_value=search_paths[WORKING_DIR])
mocker.patch("spyder.plugins.pylint.widgets.pylintgui.osp.expanduser",
return_value=search_paths[HOME_DIR])
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=search_paths[PROJECT_DIR])
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.analyze(filename=pylint_test_script)
qtbot.waitUntil(
lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,
timeout=5000)
pylint_data = pylint_widget.get_data(filename=pylint_test_script)
print(pylint_data)
assert pylint_data
conventions = pylint_data[1][3]["C:"]
assert conventions
assert len(conventions) == len(bad_names)
assert all([sum([bad_name in message[2] for message in conventions]) == 1
for bad_name in bad_names])
def test_pylint_max_history_conf(pylint_test_scripts, mocker):
"""Regression test for checking max_entries configuration.
For further information see spyder-ide/spyder#12884
"""
# Create the pylint widget for code analysis
main_window = MainWindowMock()
main_window.projects.get_active_project_path = mocker.MagicMock(
return_value=None)
pylint_sw = Pylint(parent=main_window)
pylint_widget = PylintWidget(parent=pylint_sw)
pylint_widget.filecombo.clear()
script_0, script_1, script_2 = pylint_test_scripts(
["test_script_{}.py".format(n) for n in range(3)])
# Change the max_entry to 2
pylint_widget.parent.set_option('max_entries', 2)
pylint_widget.change_history_limit(2)
assert pylint_widget.parent.get_option('max_entries') == 2
# Call to set_filename
pylint_widget.set_filename(filename=script_0)
assert pylint_widget.filecombo.count() == 1
# Add to more filenames
pylint_widget.set_filename(filename=script_1)
pylint_widget.set_filename(filename=script_2)
assert pylint_widget.filecombo.count() == 2
assert 'test_script_2.py' in pylint_widget.curr_filenames[0]
assert 'test_script_1.py' in pylint_widget.curr_filenames[1]
# Change the max entry to 1
pylint_widget.parent.set_option('max_entries', 1)
pylint_widget.change_history_limit(1)
assert pylint_widget.filecombo.count() == 1
assert 'test_script_2.py' in pylint_widget.curr_filenames[0]
if __name__ == "__main__":
pytest.main([osp.basename(__file__), '-vv', '-rw'])
|
normal
|
{
"blob_id": "22792937415a8ee4cecff2a9683c435abe54bdab",
"index": 5516,
"step-1": "<mask token>\n\n\nclass MainWindowMock(QObject):\n sig_editor_focus_changed = Signal(str)\n\n def __init__(self):\n super(MainWindowMock, self).__init__(None)\n self.editor = Mock()\n self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed\n self.projects = MagicMock()\n\n\n<mask token>\n\n\[email protected]\ndef pylint_test_script(pylintrc_search_paths):\n \"\"\"Write a script for testing Pylint to a temporary directory.\"\"\"\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], 'test_script.py')\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n return script_path\n\n\[email protected]\ndef pylint_test_scripts(pylintrc_search_paths):\n\n def _pylint_test_scripts(filenames):\n \"\"\"Write scripts for testing Pylint to a temporary directory.\"\"\"\n script_paths = []\n for filename in filenames:\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], filename)\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n script_paths.append(script_path)\n return script_paths\n return _pylint_test_scripts\n\n\[email protected](params=[[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [\n HOME_DIR], [SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR], [\n SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR], [SCRIPT_DIR,\n WORKING_DIR, PROJECT_DIR, HOME_DIR]], ids=['None', 'Script', 'Working',\n 'Project', 'Home', 'Script & Home', 'Working & Project',\n 'Script & Working', 'Project & Home', 'All'])\ndef pylintrc_files(pylintrc_search_paths, request):\n \"\"\"Store test .pylintrc files at the paths and determine the result.\"\"\"\n search_paths = pylintrc_search_paths\n pylintrc_locations = request.param\n bad_names = [ALL_DIR]\n for search_path_name, search_path in search_paths.items():\n if search_path_name in pylintrc_locations:\n expected_path = osp.join(search_path, PYLINTRC_FILENAME)\n bad_names += [search_path_name]\n break\n else:\n expected_path = None\n bad_names = [NO_DIR]\n for location in pylintrc_locations:\n pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(bad_names=\n ', '.join([location, ALL_DIR]))\n pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)\n with open(pylintrc_path, mode='w', encoding='utf-8', newline='\\n'\n ) as rc_file:\n rc_file.write(pylintrc_test_contents)\n return search_paths, expected_path, bad_names\n\n\ndef test_get_pylintrc_path(pylintrc_files, mocker):\n \"\"\"Test that get_pylintrc_path finds the expected one in the hiearchy.\"\"\"\n search_paths, expected_path, __ = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n actual_path = get_pylintrc_path(search_paths=list(search_paths.values()\n ), home_path=search_paths[HOME_DIR])\n assert actual_path == expected_path\n\n\ndef test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):\n \"\"\"Test that pylint works without errors with no project open.\"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n assert pylint_data[0] is not None\n assert pylint_data[1] is not None\n\n\ndef test_pylint_widget_pylintrc(pylint_test_script, pylintrc_files, mocker,\n qtbot):\n \"\"\"Test that entire pylint widget gets results depending on pylintrc.\"\"\"\n search_paths, __, bad_names = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home',\n return_value=search_paths[WORKING_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.osp.expanduser',\n return_value=search_paths[HOME_DIR])\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=search_paths[PROJECT_DIR])\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n conventions = pylint_data[1][3]['C:']\n assert conventions\n assert len(conventions) == len(bad_names)\n assert all([(sum([(bad_name in message[2]) for message in conventions]) ==\n 1) for bad_name in bad_names])\n\n\ndef test_pylint_max_history_conf(pylint_test_scripts, mocker):\n \"\"\"Regression test for checking max_entries configuration.\n\n For further information see spyder-ide/spyder#12884\n \"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.filecombo.clear()\n script_0, script_1, script_2 = pylint_test_scripts(['test_script_{}.py'\n .format(n) for n in range(3)])\n pylint_widget.parent.set_option('max_entries', 2)\n pylint_widget.change_history_limit(2)\n assert pylint_widget.parent.get_option('max_entries') == 2\n pylint_widget.set_filename(filename=script_0)\n assert pylint_widget.filecombo.count() == 1\n pylint_widget.set_filename(filename=script_1)\n pylint_widget.set_filename(filename=script_2)\n assert pylint_widget.filecombo.count() == 2\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n assert 'test_script_1.py' in pylint_widget.curr_filenames[1]\n pylint_widget.parent.set_option('max_entries', 1)\n pylint_widget.change_history_limit(1)\n assert pylint_widget.filecombo.count() == 1\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindowMock(QObject):\n sig_editor_focus_changed = Signal(str)\n\n def __init__(self):\n super(MainWindowMock, self).__init__(None)\n self.editor = Mock()\n self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed\n self.projects = MagicMock()\n\n\[email protected]\ndef pylintrc_search_paths(tmp_path_factory):\n \"\"\"Construct temporary .pylintrc search paths.\"\"\"\n search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name)) for\n dir_name in DIR_LIST}\n return search_paths\n\n\[email protected]\ndef pylint_test_script(pylintrc_search_paths):\n \"\"\"Write a script for testing Pylint to a temporary directory.\"\"\"\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], 'test_script.py')\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n return script_path\n\n\[email protected]\ndef pylint_test_scripts(pylintrc_search_paths):\n\n def _pylint_test_scripts(filenames):\n \"\"\"Write scripts for testing Pylint to a temporary directory.\"\"\"\n script_paths = []\n for filename in filenames:\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], filename)\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n script_paths.append(script_path)\n return script_paths\n return _pylint_test_scripts\n\n\[email protected](params=[[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [\n HOME_DIR], [SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR], [\n SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR], [SCRIPT_DIR,\n WORKING_DIR, PROJECT_DIR, HOME_DIR]], ids=['None', 'Script', 'Working',\n 'Project', 'Home', 'Script & Home', 'Working & Project',\n 'Script & Working', 'Project & Home', 'All'])\ndef pylintrc_files(pylintrc_search_paths, request):\n \"\"\"Store test .pylintrc files at the paths and determine the result.\"\"\"\n search_paths = pylintrc_search_paths\n pylintrc_locations = request.param\n bad_names = [ALL_DIR]\n for search_path_name, search_path in search_paths.items():\n if search_path_name in pylintrc_locations:\n expected_path = osp.join(search_path, PYLINTRC_FILENAME)\n bad_names += [search_path_name]\n break\n else:\n expected_path = None\n bad_names = [NO_DIR]\n for location in pylintrc_locations:\n pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(bad_names=\n ', '.join([location, ALL_DIR]))\n pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)\n with open(pylintrc_path, mode='w', encoding='utf-8', newline='\\n'\n ) as rc_file:\n rc_file.write(pylintrc_test_contents)\n return search_paths, expected_path, bad_names\n\n\ndef test_get_pylintrc_path(pylintrc_files, mocker):\n \"\"\"Test that get_pylintrc_path finds the expected one in the hiearchy.\"\"\"\n search_paths, expected_path, __ = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n actual_path = get_pylintrc_path(search_paths=list(search_paths.values()\n ), home_path=search_paths[HOME_DIR])\n assert actual_path == expected_path\n\n\ndef test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):\n \"\"\"Test that pylint works without errors with no project open.\"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n assert pylint_data[0] is not None\n assert pylint_data[1] is not None\n\n\ndef test_pylint_widget_pylintrc(pylint_test_script, pylintrc_files, mocker,\n qtbot):\n \"\"\"Test that entire pylint widget gets results depending on pylintrc.\"\"\"\n search_paths, __, bad_names = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home',\n return_value=search_paths[WORKING_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.osp.expanduser',\n return_value=search_paths[HOME_DIR])\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=search_paths[PROJECT_DIR])\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n conventions = pylint_data[1][3]['C:']\n assert conventions\n assert len(conventions) == len(bad_names)\n assert all([(sum([(bad_name in message[2]) for message in conventions]) ==\n 1) for bad_name in bad_names])\n\n\ndef test_pylint_max_history_conf(pylint_test_scripts, mocker):\n \"\"\"Regression test for checking max_entries configuration.\n\n For further information see spyder-ide/spyder#12884\n \"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.filecombo.clear()\n script_0, script_1, script_2 = pylint_test_scripts(['test_script_{}.py'\n .format(n) for n in range(3)])\n pylint_widget.parent.set_option('max_entries', 2)\n pylint_widget.change_history_limit(2)\n assert pylint_widget.parent.get_option('max_entries') == 2\n pylint_widget.set_filename(filename=script_0)\n assert pylint_widget.filecombo.count() == 1\n pylint_widget.set_filename(filename=script_1)\n pylint_widget.set_filename(filename=script_2)\n assert pylint_widget.filecombo.count() == 2\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n assert 'test_script_1.py' in pylint_widget.curr_filenames[1]\n pylint_widget.parent.set_option('max_entries', 1)\n pylint_widget.change_history_limit(1)\n assert pylint_widget.filecombo.count() == 1\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MainWindowMock(QObject):\n sig_editor_focus_changed = Signal(str)\n\n def __init__(self):\n super(MainWindowMock, self).__init__(None)\n self.editor = Mock()\n self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed\n self.projects = MagicMock()\n\n\[email protected]\ndef pylintrc_search_paths(tmp_path_factory):\n \"\"\"Construct temporary .pylintrc search paths.\"\"\"\n search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name)) for\n dir_name in DIR_LIST}\n return search_paths\n\n\[email protected]\ndef pylint_test_script(pylintrc_search_paths):\n \"\"\"Write a script for testing Pylint to a temporary directory.\"\"\"\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], 'test_script.py')\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n return script_path\n\n\[email protected]\ndef pylint_test_scripts(pylintrc_search_paths):\n\n def _pylint_test_scripts(filenames):\n \"\"\"Write scripts for testing Pylint to a temporary directory.\"\"\"\n script_paths = []\n for filename in filenames:\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], filename)\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n script_paths.append(script_path)\n return script_paths\n return _pylint_test_scripts\n\n\[email protected](params=[[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [\n HOME_DIR], [SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR], [\n SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR], [SCRIPT_DIR,\n WORKING_DIR, PROJECT_DIR, HOME_DIR]], ids=['None', 'Script', 'Working',\n 'Project', 'Home', 'Script & Home', 'Working & Project',\n 'Script & Working', 'Project & Home', 'All'])\ndef pylintrc_files(pylintrc_search_paths, request):\n \"\"\"Store test .pylintrc files at the paths and determine the result.\"\"\"\n search_paths = pylintrc_search_paths\n pylintrc_locations = request.param\n bad_names = [ALL_DIR]\n for search_path_name, search_path in search_paths.items():\n if search_path_name in pylintrc_locations:\n expected_path = osp.join(search_path, PYLINTRC_FILENAME)\n bad_names += [search_path_name]\n break\n else:\n expected_path = None\n bad_names = [NO_DIR]\n for location in pylintrc_locations:\n pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(bad_names=\n ', '.join([location, ALL_DIR]))\n pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)\n with open(pylintrc_path, mode='w', encoding='utf-8', newline='\\n'\n ) as rc_file:\n rc_file.write(pylintrc_test_contents)\n return search_paths, expected_path, bad_names\n\n\ndef test_get_pylintrc_path(pylintrc_files, mocker):\n \"\"\"Test that get_pylintrc_path finds the expected one in the hiearchy.\"\"\"\n search_paths, expected_path, __ = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n actual_path = get_pylintrc_path(search_paths=list(search_paths.values()\n ), home_path=search_paths[HOME_DIR])\n assert actual_path == expected_path\n\n\ndef test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):\n \"\"\"Test that pylint works without errors with no project open.\"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n assert pylint_data[0] is not None\n assert pylint_data[1] is not None\n\n\ndef test_pylint_widget_pylintrc(pylint_test_script, pylintrc_files, mocker,\n qtbot):\n \"\"\"Test that entire pylint widget gets results depending on pylintrc.\"\"\"\n search_paths, __, bad_names = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home',\n return_value=search_paths[WORKING_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.osp.expanduser',\n return_value=search_paths[HOME_DIR])\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=search_paths[PROJECT_DIR])\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n conventions = pylint_data[1][3]['C:']\n assert conventions\n assert len(conventions) == len(bad_names)\n assert all([(sum([(bad_name in message[2]) for message in conventions]) ==\n 1) for bad_name in bad_names])\n\n\ndef test_pylint_max_history_conf(pylint_test_scripts, mocker):\n \"\"\"Regression test for checking max_entries configuration.\n\n For further information see spyder-ide/spyder#12884\n \"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.filecombo.clear()\n script_0, script_1, script_2 = pylint_test_scripts(['test_script_{}.py'\n .format(n) for n in range(3)])\n pylint_widget.parent.set_option('max_entries', 2)\n pylint_widget.change_history_limit(2)\n assert pylint_widget.parent.get_option('max_entries') == 2\n pylint_widget.set_filename(filename=script_0)\n assert pylint_widget.filecombo.count() == 1\n pylint_widget.set_filename(filename=script_1)\n pylint_widget.set_filename(filename=script_2)\n assert pylint_widget.filecombo.count() == 2\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n assert 'test_script_1.py' in pylint_widget.curr_filenames[1]\n pylint_widget.parent.set_option('max_entries', 1)\n pylint_widget.change_history_limit(1)\n assert pylint_widget.filecombo.count() == 1\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n\n\nif __name__ == '__main__':\n pytest.main([osp.basename(__file__), '-vv', '-rw'])\n",
"step-4": "<mask token>\nfrom io import open\nimport os.path as osp\nfrom unittest.mock import Mock, MagicMock\nimport pytest\nfrom qtpy.QtCore import Signal, QObject\nfrom spyder.plugins.pylint.plugin import Pylint\nfrom spyder.plugins.pylint.widgets.pylintgui import PylintWidget\nfrom spyder.plugins.pylint.utils import get_pylintrc_path\nPYLINTRC_FILENAME = '.pylintrc'\nNO_DIR = 'e'\nSCRIPT_DIR = 'SCRIPT_DIR'\nWORKING_DIR = 'WORKING_DIR'\nPROJECT_DIR = 'PROJECT_DIR'\nHOME_DIR = 'HOME_DIR'\nALL_DIR = 'ALL_DIR'\nDIR_LIST = [SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]\nDIR_LIST_ALL = [NO_DIR] + DIR_LIST + [ALL_DIR]\nPYLINT_TEST_SCRIPT = \"\"\"import math\nimport os\nimport sys\n\"\"\" + '\\n'.join([(\n dir_name + ' = ' + str(idx)) for idx, dir_name in enumerate(DIR_LIST_ALL)])\nPYLINT_TEST_SCRIPT = '\"\"\"Docstring.\"\"\"\\n' + PYLINT_TEST_SCRIPT + '\\n'\nPYLINTRC_TEST_CONTENTS = \"\"\"\n[MESSAGES CONTROL]\nenable=blacklisted-name\n\n[BASIC]\nbad-names={bad_names}\ngood-names=e\n\"\"\"\n\n\nclass MainWindowMock(QObject):\n sig_editor_focus_changed = Signal(str)\n\n def __init__(self):\n super(MainWindowMock, self).__init__(None)\n self.editor = Mock()\n self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed\n self.projects = MagicMock()\n\n\[email protected]\ndef pylintrc_search_paths(tmp_path_factory):\n \"\"\"Construct temporary .pylintrc search paths.\"\"\"\n search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name)) for\n dir_name in DIR_LIST}\n return search_paths\n\n\[email protected]\ndef pylint_test_script(pylintrc_search_paths):\n \"\"\"Write a script for testing Pylint to a temporary directory.\"\"\"\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], 'test_script.py')\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n return script_path\n\n\[email protected]\ndef pylint_test_scripts(pylintrc_search_paths):\n\n def _pylint_test_scripts(filenames):\n \"\"\"Write scripts for testing Pylint to a temporary directory.\"\"\"\n script_paths = []\n for filename in filenames:\n script_path = osp.join(pylintrc_search_paths[SCRIPT_DIR], filename)\n with open(script_path, mode='w', encoding='utf-8', newline='\\n'\n ) as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n script_paths.append(script_path)\n return script_paths\n return _pylint_test_scripts\n\n\[email protected](params=[[], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [\n HOME_DIR], [SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR], [\n SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR], [SCRIPT_DIR,\n WORKING_DIR, PROJECT_DIR, HOME_DIR]], ids=['None', 'Script', 'Working',\n 'Project', 'Home', 'Script & Home', 'Working & Project',\n 'Script & Working', 'Project & Home', 'All'])\ndef pylintrc_files(pylintrc_search_paths, request):\n \"\"\"Store test .pylintrc files at the paths and determine the result.\"\"\"\n search_paths = pylintrc_search_paths\n pylintrc_locations = request.param\n bad_names = [ALL_DIR]\n for search_path_name, search_path in search_paths.items():\n if search_path_name in pylintrc_locations:\n expected_path = osp.join(search_path, PYLINTRC_FILENAME)\n bad_names += [search_path_name]\n break\n else:\n expected_path = None\n bad_names = [NO_DIR]\n for location in pylintrc_locations:\n pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(bad_names=\n ', '.join([location, ALL_DIR]))\n pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)\n with open(pylintrc_path, mode='w', encoding='utf-8', newline='\\n'\n ) as rc_file:\n rc_file.write(pylintrc_test_contents)\n return search_paths, expected_path, bad_names\n\n\ndef test_get_pylintrc_path(pylintrc_files, mocker):\n \"\"\"Test that get_pylintrc_path finds the expected one in the hiearchy.\"\"\"\n search_paths, expected_path, __ = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n actual_path = get_pylintrc_path(search_paths=list(search_paths.values()\n ), home_path=search_paths[HOME_DIR])\n assert actual_path == expected_path\n\n\ndef test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):\n \"\"\"Test that pylint works without errors with no project open.\"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n assert pylint_data[0] is not None\n assert pylint_data[1] is not None\n\n\ndef test_pylint_widget_pylintrc(pylint_test_script, pylintrc_files, mocker,\n qtbot):\n \"\"\"Test that entire pylint widget gets results depending on pylintrc.\"\"\"\n search_paths, __, bad_names = pylintrc_files\n mocker.patch('pylint.config.os.path.expanduser', return_value=\n search_paths[HOME_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home',\n return_value=search_paths[WORKING_DIR])\n mocker.patch('spyder.plugins.pylint.widgets.pylintgui.osp.expanduser',\n return_value=search_paths[HOME_DIR])\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=search_paths[PROJECT_DIR])\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(lambda : pylint_widget.get_data(pylint_test_script)[1]\n is not None, timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n conventions = pylint_data[1][3]['C:']\n assert conventions\n assert len(conventions) == len(bad_names)\n assert all([(sum([(bad_name in message[2]) for message in conventions]) ==\n 1) for bad_name in bad_names])\n\n\ndef test_pylint_max_history_conf(pylint_test_scripts, mocker):\n \"\"\"Regression test for checking max_entries configuration.\n\n For further information see spyder-ide/spyder#12884\n \"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.filecombo.clear()\n script_0, script_1, script_2 = pylint_test_scripts(['test_script_{}.py'\n .format(n) for n in range(3)])\n pylint_widget.parent.set_option('max_entries', 2)\n pylint_widget.change_history_limit(2)\n assert pylint_widget.parent.get_option('max_entries') == 2\n pylint_widget.set_filename(filename=script_0)\n assert pylint_widget.filecombo.count() == 1\n pylint_widget.set_filename(filename=script_1)\n pylint_widget.set_filename(filename=script_2)\n assert pylint_widget.filecombo.count() == 2\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n assert 'test_script_1.py' in pylint_widget.curr_filenames[1]\n pylint_widget.parent.set_option('max_entries', 1)\n pylint_widget.change_history_limit(1)\n assert pylint_widget.filecombo.count() == 1\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n\n\nif __name__ == '__main__':\n pytest.main([osp.basename(__file__), '-vv', '-rw'])\n",
"step-5": "# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------------\n# Copyright © 2020- Spyder Project Contributors\n#\n# Released under the terms of the MIT License\n# ----------------------------------------------------------------------------\n\n\"\"\"Tests for the execution of pylint.\"\"\"\n\n# Standard library imports\nfrom io import open\nimport os.path as osp\nfrom unittest.mock import Mock, MagicMock\n\n# Third party imports\nimport pytest\nfrom qtpy.QtCore import Signal, QObject\n\n# Local imports\nfrom spyder.plugins.pylint.plugin import Pylint\nfrom spyder.plugins.pylint.widgets.pylintgui import PylintWidget\nfrom spyder.plugins.pylint.utils import get_pylintrc_path\n\n# pylint: disable=redefined-outer-name\n\nPYLINTRC_FILENAME = \".pylintrc\"\n\n# Constants for dir name keys\n# In Python 3 and Spyder 5, replace with enum\nNO_DIR = \"e\"\nSCRIPT_DIR = \"SCRIPT_DIR\"\nWORKING_DIR = \"WORKING_DIR\"\nPROJECT_DIR = \"PROJECT_DIR\"\nHOME_DIR = \"HOME_DIR\"\nALL_DIR = \"ALL_DIR\"\n\nDIR_LIST = [SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]\nDIR_LIST_ALL = [NO_DIR] + DIR_LIST + [ALL_DIR]\n\nPYLINT_TEST_SCRIPT = \"import math\\nimport os\\nimport sys\\n\" + \"\\n\".join(\n [dir_name + \" = \" + str(idx) for idx, dir_name in enumerate(DIR_LIST_ALL)])\nPYLINT_TEST_SCRIPT = \"\\\"\\\"\\\"Docstring.\\\"\\\"\\\"\\n\" + PYLINT_TEST_SCRIPT + \"\\n\"\n\nPYLINTRC_TEST_CONTENTS = \"\"\"\n[MESSAGES CONTROL]\nenable=blacklisted-name\n\n[BASIC]\nbad-names={bad_names}\ngood-names=e\n\"\"\"\n\n\nclass MainWindowMock(QObject):\n sig_editor_focus_changed = Signal(str)\n\n def __init__(self):\n super(MainWindowMock, self).__init__(None)\n self.editor = Mock()\n self.editor.sig_editor_focus_changed = self.sig_editor_focus_changed\n self.projects = MagicMock()\n\n\[email protected]\ndef pylintrc_search_paths(tmp_path_factory):\n \"\"\"Construct temporary .pylintrc search paths.\"\"\"\n search_paths = {dir_name: str(tmp_path_factory.mktemp(dir_name))\n for dir_name in DIR_LIST}\n return search_paths\n\n\[email protected]\ndef pylint_test_script(pylintrc_search_paths):\n \"\"\"Write a script for testing Pylint to a temporary directory.\"\"\"\n script_path = osp.join(\n pylintrc_search_paths[SCRIPT_DIR], \"test_script.py\")\n with open(script_path, mode=\"w\",\n encoding=\"utf-8\", newline=\"\\n\") as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n\n return script_path\n\n\[email protected]\ndef pylint_test_scripts(pylintrc_search_paths):\n def _pylint_test_scripts(filenames):\n \"\"\"Write scripts for testing Pylint to a temporary directory.\"\"\"\n script_paths = []\n for filename in filenames:\n script_path = osp.join(\n pylintrc_search_paths[SCRIPT_DIR], filename)\n with open(script_path, mode=\"w\",\n encoding=\"utf-8\", newline=\"\\n\") as script_file:\n script_file.write(PYLINT_TEST_SCRIPT)\n script_paths.append(script_path)\n return script_paths\n return _pylint_test_scripts\n\n\[email protected](\n params=[\n [], [SCRIPT_DIR], [WORKING_DIR], [PROJECT_DIR], [HOME_DIR],\n [SCRIPT_DIR, HOME_DIR], [WORKING_DIR, PROJECT_DIR],\n [SCRIPT_DIR, PROJECT_DIR], [PROJECT_DIR, HOME_DIR],\n [SCRIPT_DIR, WORKING_DIR, PROJECT_DIR, HOME_DIR]],\n ids=[\"None\", \"Script\", \"Working\", \"Project\", \"Home\", \"Script & Home\",\n \"Working & Project\", \"Script & Working\", \"Project & Home\", \"All\"])\ndef pylintrc_files(pylintrc_search_paths, request):\n \"\"\"Store test .pylintrc files at the paths and determine the result.\"\"\"\n search_paths = pylintrc_search_paths\n\n # Determine the bad names that should be reported\n pylintrc_locations = request.param\n bad_names = [ALL_DIR]\n for search_path_name, search_path in search_paths.items():\n if search_path_name in pylintrc_locations:\n expected_path = osp.join(search_path, PYLINTRC_FILENAME)\n bad_names += [search_path_name]\n break\n else:\n expected_path = None\n bad_names = [NO_DIR]\n\n # Store the selected pylintrc files at the designated paths\n for location in pylintrc_locations:\n pylintrc_test_contents = PYLINTRC_TEST_CONTENTS.format(\n bad_names=\", \".join([location, ALL_DIR]))\n pylintrc_path = osp.join(search_paths[location], PYLINTRC_FILENAME)\n with open(pylintrc_path, mode=\"w\",\n encoding=\"utf-8\", newline=\"\\n\") as rc_file:\n rc_file.write(pylintrc_test_contents)\n return search_paths, expected_path, bad_names\n\n\ndef test_get_pylintrc_path(pylintrc_files, mocker):\n \"\"\"Test that get_pylintrc_path finds the expected one in the hiearchy.\"\"\"\n search_paths, expected_path, __ = pylintrc_files\n mocker.patch(\"pylint.config.os.path.expanduser\",\n return_value=search_paths[HOME_DIR])\n actual_path = get_pylintrc_path(\n search_paths=list(search_paths.values()),\n home_path=search_paths[HOME_DIR],\n )\n assert actual_path == expected_path\n\n\ndef test_pylint_widget_noproject(pylint_test_script, mocker, qtbot):\n \"\"\"Test that pylint works without errors with no project open.\"\"\"\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(\n lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,\n timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n assert pylint_data[0] is not None\n assert pylint_data[1] is not None\n\n\ndef test_pylint_widget_pylintrc(\n pylint_test_script, pylintrc_files, mocker, qtbot):\n \"\"\"Test that entire pylint widget gets results depending on pylintrc.\"\"\"\n search_paths, __, bad_names = pylintrc_files\n mocker.patch(\"pylint.config.os.path.expanduser\",\n return_value=search_paths[HOME_DIR])\n mocker.patch(\"spyder.plugins.pylint.widgets.pylintgui.getcwd_or_home\",\n return_value=search_paths[WORKING_DIR])\n mocker.patch(\"spyder.plugins.pylint.widgets.pylintgui.osp.expanduser\",\n return_value=search_paths[HOME_DIR])\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=search_paths[PROJECT_DIR])\n pylint_sw = Pylint(parent=main_window)\n\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.analyze(filename=pylint_test_script)\n qtbot.waitUntil(\n lambda: pylint_widget.get_data(pylint_test_script)[1] is not None,\n timeout=5000)\n pylint_data = pylint_widget.get_data(filename=pylint_test_script)\n print(pylint_data)\n assert pylint_data\n conventions = pylint_data[1][3][\"C:\"]\n assert conventions\n assert len(conventions) == len(bad_names)\n assert all([sum([bad_name in message[2] for message in conventions]) == 1\n for bad_name in bad_names])\n\n\ndef test_pylint_max_history_conf(pylint_test_scripts, mocker):\n \"\"\"Regression test for checking max_entries configuration.\n\n For further information see spyder-ide/spyder#12884\n \"\"\"\n # Create the pylint widget for code analysis\n main_window = MainWindowMock()\n main_window.projects.get_active_project_path = mocker.MagicMock(\n return_value=None)\n pylint_sw = Pylint(parent=main_window)\n pylint_widget = PylintWidget(parent=pylint_sw)\n pylint_widget.filecombo.clear()\n\n script_0, script_1, script_2 = pylint_test_scripts(\n [\"test_script_{}.py\".format(n) for n in range(3)])\n\n # Change the max_entry to 2\n pylint_widget.parent.set_option('max_entries', 2)\n pylint_widget.change_history_limit(2)\n assert pylint_widget.parent.get_option('max_entries') == 2\n\n # Call to set_filename\n pylint_widget.set_filename(filename=script_0)\n assert pylint_widget.filecombo.count() == 1\n\n # Add to more filenames\n pylint_widget.set_filename(filename=script_1)\n pylint_widget.set_filename(filename=script_2)\n\n assert pylint_widget.filecombo.count() == 2\n\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n assert 'test_script_1.py' in pylint_widget.curr_filenames[1]\n\n # Change the max entry to 1\n pylint_widget.parent.set_option('max_entries', 1)\n pylint_widget.change_history_limit(1)\n\n assert pylint_widget.filecombo.count() == 1\n\n assert 'test_script_2.py' in pylint_widget.curr_filenames[0]\n\n\nif __name__ == \"__main__\":\n pytest.main([osp.basename(__file__), '-vv', '-rw'])\n",
"step-ids": [
10,
11,
12,
14,
15
]
}
|
[
10,
11,
12,
14,
15
] |
from . import preprocess
from . import utils
import random
import pickle
import feather
import time
import datetime
import sys
import os
import numpy as np
import pandas as pd
import json
from ...main import api
from flask import request
from flask_restplus import Resource, fields
import warnings
warnings.simplefilter("ignore")
predict_fields = api.model('Prediction Data', {
})
predict_accounts = api.model('Prediction Data By Employee', {
})
prediction = api.model('Prediction', {'attritionproba': fields.Float(
example=0.345), 'attritiondate': fields.String(example='2020-10-06T00:00:00.000Z')})
predictionByEmployee = api.model('Prediction By Employee', {})
model = api.model(
'Predictions', {'predictions': fields.List(fields.Nested(prediction))})
modelByEmployee = api.model(
'Predictions By Employee', {'predictions': fields.List(fields.Nested(predictionByEmployee))})
parser = api.parser()
parser.add_argument('predictdate', location='args', default=datetime.date.today().strftime("%Y-%m-%d"), help='Predict date', required=True)
@api.route("/predict")
@api.expect(parser)
class Predict(Resource):
@api.expect(predict_fields)
@api.marshal_with(model)
def post(self):
args = parser.parse_args()
return getPrediction(request.get_json(), args['predictdate'])
@api.route("/predict/<string:companyid>/<string:accountid>")
@api.expect(parser)
class PredictEmployeeByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid, accountid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, [int(accountid)], args['predictdate'])
@api.route("/predict/<string:companyid>")
@api.expect(parser)
class PredictByCompany(Resource):
@api.marshal_with(modelByEmployee)
def get(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, None, args['predictdate'])
@api.expect(predict_accounts)
@api.marshal_with(modelByEmployee)
def post(self, companyid):
args = parser.parse_args()
return getPredictionByEmployee(companyid, request.get_json()['accountids'], args['predictdate'])
package_directory = os.path.dirname(os.path.abspath(__file__))
def predict_class(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict_proba(df)[:, 1])
else:
result = pd.Series(random.sample(
range(1000), df.shape[0])).divide(10000)
return result
def predict_reg(local_model, df):
if os.path.isfile(local_model):
model = pickle.load(open(local_model, 'rb'))
result = pd.Series(model.predict(df)).apply(int).clip(lower=0)
else:
result = pd.Series(random.sample(range(100, 1000), df.shape[0]))
return result
def getPrediction(data, predictdate=np.datetime64('today')):
request_json = data
if request_json and 'instances' in request_json and 'companyid' in request_json and 'columns' in request_json:
sys.stdout = open(utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
companyid = str(request_json['companyid'])
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
columns = request_json['columns']
df = pd.DataFrame(request_json['instances'], columns=columns)
df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', predictdate)
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame({'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
else:
return {'attritionproba': 0, 'attritiondate': ''}
def getPredictionByEmployee(companyid, accountid=None, predictdate=np.datetime64('today')):
sys.stdout = open(
utils.log_dir + time.strftime("%Y%m%d-%H%M%S") + '_predict.txt', 'w')
# copy model
print(datetime.datetime.now(), 'Predict for company', companyid)
local_class_model = utils.model_dir + companyid + '/classification/model.pkl'
local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'
if np.datetime64(predictdate) >= np.datetime64('today'):
strtodate = ''
else:
strtodate = np.datetime64(predictdate).astype(datetime.datetime).strftime('%Y%m')
if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather'):
df = feather.read_dataframe(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
else:
df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df, utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')
if os.path.isfile(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather'):
df_1 = feather.read_dataframe(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
else:
df_1 = pd.read_csv(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.csv', low_memory=False)
feather.write_dataframe(df_1, utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')
if accountid:
df = df.loc[(df['CompId'] == int(companyid)) & (df['AccountId'].isin(accountid))].reset_index(drop=True)
df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & (df_1['AccountId'].isin(accountid))].reset_index(drop=True)
else:
df = df.loc[(df['CompId'] == int(companyid))]
df_1 = df_1.loc[(df['CompId'] == int(companyid))]
#df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', np.datetime64(predictdate))
df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')
print(datetime.datetime.now(), 'Predict for data', df_1.shape)
data = {}
result_class = predict_class(local_class_model, df_1)
result_reg = predict_reg(local_reg_model, df_1)
df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')
result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')
data['predictions'] = json.loads(pd.DataFrame(
{'accountid': df['AccountId'], 'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))
sys.stdout.close()
return data
|
normal
|
{
"blob_id": "c76fd9b196b50e6fcced7e56517c0cd8ab30e24e",
"index": 7891,
"step-1": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n <mask token>\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\[email protected]('/predict')\[email protected](parser)\nclass Predict(Resource):\n\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected]('/predict/<string:companyid>/<string:accountid>')\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args[\n 'predictdate'])\n\n\[email protected]('/predict/<string:companyid>')\[email protected](parser)\nclass PredictByCompany(Resource):\n\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()[\n 'accountids'], args['predictdate'])\n\n\n<mask token>\n\n\ndef predict_class(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict_proba(df)[:, 1])\n else:\n result = pd.Series(random.sample(range(1000), df.shape[0])).divide(\n 10000)\n return result\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\n<mask token>\n\n\ndef getPredictionByEmployee(companyid, accountid=None, predictdate=np.\n datetime64('today')):\n sys.stdout = open(utils.log_dir + time.strftime('%Y%m%d-%H%M%S') +\n '_predict.txt', 'w')\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = (utils.model_dir + companyid +\n '/classification/model.pkl')\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n if np.datetime64(predictdate) >= np.datetime64('today'):\n strtodate = ''\n else:\n strtodate = np.datetime64(predictdate).astype(datetime.datetime\n ).strftime('%Y%m')\n if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' +\n strtodate + '.feather'):\n df = feather.read_dataframe(utils.data_dir + companyid +\n '/preparedData_test' + strtodate + '.feather')\n else:\n df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' +\n strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df, utils.data_dir + companyid +\n '/preparedData_test' + strtodate + '.feather')\n if os.path.isfile(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather'):\n df_1 = feather.read_dataframe(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather')\n else:\n df_1 = pd.read_csv(utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df_1, utils.model_dir + companyid +\n '/preprocessedData_test' + strtodate + '.feather')\n if accountid:\n df = df.loc[(df['CompId'] == int(companyid)) & df['AccountId'].isin\n (accountid)].reset_index(drop=True)\n df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & df_1[\n 'AccountId'].isin(accountid)].reset_index(drop=True)\n else:\n df = df.loc[df['CompId'] == int(companyid)]\n df_1 = df_1.loc[df['CompId'] == int(companyid)]\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId',\n 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n print(datetime.datetime.now(), 'Predict for data', df_1.shape)\n data = {}\n result_class = predict_class(local_class_model, df_1)\n result_reg = predict_reg(local_reg_model, df_1)\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n data['predictions'] = json.loads(pd.DataFrame({'accountid': df[\n 'AccountId'], 'attritionproba': result_class, 'attritiondate':\n result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n",
"step-5": "from . import preprocess\nfrom . import utils\nimport random\nimport pickle\nimport feather\nimport time\nimport datetime\nimport sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport json\nfrom ...main import api\nfrom flask import request\nfrom flask_restplus import Resource, fields\n\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\n\npredict_fields = api.model('Prediction Data', {\n})\n\npredict_accounts = api.model('Prediction Data By Employee', {\n \n})\n\nprediction = api.model('Prediction', {'attritionproba': fields.Float(\n example=0.345), 'attritiondate': fields.String(example='2020-10-06T00:00:00.000Z')})\n\npredictionByEmployee = api.model('Prediction By Employee', {})\n\nmodel = api.model(\n 'Predictions', {'predictions': fields.List(fields.Nested(prediction))})\n\nmodelByEmployee = api.model(\n 'Predictions By Employee', {'predictions': fields.List(fields.Nested(predictionByEmployee))})\n\nparser = api.parser()\nparser.add_argument('predictdate', location='args', default=datetime.date.today().strftime(\"%Y-%m-%d\"), help='Predict date', required=True)\n\n\[email protected](\"/predict\")\[email protected](parser)\nclass Predict(Resource):\n @api.expect(predict_fields)\n @api.marshal_with(model)\n def post(self):\n args = parser.parse_args()\n return getPrediction(request.get_json(), args['predictdate'])\n\n\[email protected](\"/predict/<string:companyid>/<string:accountid>\")\[email protected](parser)\nclass PredictEmployeeByCompany(Resource):\n @api.marshal_with(modelByEmployee)\n def get(self, companyid, accountid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, [int(accountid)], args['predictdate'])\n\n\[email protected](\"/predict/<string:companyid>\")\[email protected](parser)\nclass PredictByCompany(Resource):\n @api.marshal_with(modelByEmployee)\n def get(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, None, args['predictdate'])\n\n @api.expect(predict_accounts)\n @api.marshal_with(modelByEmployee)\n def post(self, companyid):\n args = parser.parse_args()\n return getPredictionByEmployee(companyid, request.get_json()['accountids'], args['predictdate'])\n\n\npackage_directory = os.path.dirname(os.path.abspath(__file__))\n\n\ndef predict_class(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict_proba(df)[:, 1])\n else:\n result = pd.Series(random.sample(\n range(1000), df.shape[0])).divide(10000)\n\n return result\n\n\ndef predict_reg(local_model, df):\n if os.path.isfile(local_model):\n model = pickle.load(open(local_model, 'rb'))\n result = pd.Series(model.predict(df)).apply(int).clip(lower=0)\n else:\n result = pd.Series(random.sample(range(100, 1000), df.shape[0]))\n return result\n\n\ndef getPrediction(data, predictdate=np.datetime64('today')):\n\n request_json = data\n\n if request_json and 'instances' in request_json and 'companyid' in request_json and 'columns' in request_json:\n sys.stdout = open(utils.log_dir + time.strftime(\"%Y%m%d-%H%M%S\") + '_predict.txt', 'w')\n # copy model\n companyid = str(request_json['companyid'])\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = utils.model_dir + companyid + '/classification/model.pkl'\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n columns = request_json['columns']\n df = pd.DataFrame(request_json['instances'], columns=columns)\n df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', predictdate)\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n data = {}\n result_class = predict_class(local_class_model, df_1)\n\n result_reg = predict_reg(local_reg_model, df_1)\n\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n\n data['predictions'] = json.loads(pd.DataFrame({'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n else:\n return {'attritionproba': 0, 'attritiondate': ''}\n\n\ndef getPredictionByEmployee(companyid, accountid=None, predictdate=np.datetime64('today')):\n sys.stdout = open(\n utils.log_dir + time.strftime(\"%Y%m%d-%H%M%S\") + '_predict.txt', 'w')\n # copy model\n\n print(datetime.datetime.now(), 'Predict for company', companyid)\n local_class_model = utils.model_dir + companyid + '/classification/model.pkl'\n local_reg_model = utils.model_dir + companyid + '/regression/model.pkl'\n\n if np.datetime64(predictdate) >= np.datetime64('today'):\n strtodate = ''\n else:\n strtodate = np.datetime64(predictdate).astype(datetime.datetime).strftime('%Y%m')\n \n if os.path.isfile(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather'):\n df = feather.read_dataframe(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')\n else:\n df = pd.read_csv(utils.data_dir + companyid + '/preparedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df, utils.data_dir + companyid + '/preparedData_test' + strtodate + '.feather')\n \n if os.path.isfile(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather'):\n df_1 = feather.read_dataframe(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')\n else:\n df_1 = pd.read_csv(utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.csv', low_memory=False)\n feather.write_dataframe(df_1, utils.model_dir + companyid + '/preprocessedData_test' + strtodate + '.feather')\n\n if accountid:\n df = df.loc[(df['CompId'] == int(companyid)) & (df['AccountId'].isin(accountid))].reset_index(drop=True)\n df_1 = df_1.loc[(df_1['CompId'] == int(companyid)) & (df_1['AccountId'].isin(accountid))].reset_index(drop=True)\n else:\n df = df.loc[(df['CompId'] == int(companyid))]\n df_1 = df_1.loc[(df['CompId'] == int(companyid))]\n\n #df_1 = preprocess.preprocessDF(df, utils.model_dir + companyid + '/', np.datetime64(predictdate))\n\n df_1 = df_1.drop(['CompId', 'AccountId', 'AttritionReasonId', 'AttritionDays', 'IsAttrition', 'ReasonId'], axis=1, errors='ignore')\n print(datetime.datetime.now(), 'Predict for data', df_1.shape)\n\n data = {}\n result_class = predict_class(local_class_model, df_1)\n\n result_reg = predict_reg(local_reg_model, df_1)\n\n df['HiredOrReHired'] = df['HiredOrReHired'].astype('datetime64[D]')\n result_date = df['HiredOrReHired'] + pd.to_timedelta(result_reg, 'D')\n\n data['predictions'] = json.loads(pd.DataFrame(\n {'accountid': df['AccountId'], 'attritionproba': result_class, 'attritiondate': result_date}).to_json(orient='records', date_format='iso'))\n sys.stdout.close()\n return data\n",
"step-ids": [
6,
7,
8,
10,
15
]
}
|
[
6,
7,
8,
10,
15
] |
import json
import pika
import urllib.request
def validate_urls():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='urlValidationQueue')
channel.basic_consume(validate_url,
queue='urlValidationQueue',
no_ack=True)
channel.start_consuming()
def validate_url(ch, method, properties, body):
message = json.loads(body)
valid = True
print(f'Got new URL to check: {message["url"]}.')
try:
urllib.request.urlopen('https://github.com/' + message["url"])
except urllib.error.HTTPError as e:
if e.code != 200:
valid = False
print(f'Checking done. Link accessible: {valid}.')
request = urllib.request.Request('http://localhost:5002/post/' + str(message["id"]) + '/update',
json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',
headers={'content-type': 'application/json'})
urllib.request.urlopen(request)
print(f'Post status updated.')
if __name__ == '__main__':
print("Validator worker started. Waiting for tasks to do...")
validate_urls()
|
normal
|
{
"blob_id": "4a09096abf073294afcf21b1eff9350329d4db33",
"index": 5252,
"step-1": "<mask token>\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-4": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')\n )\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url, queue='urlValidationQueue', no_ack=True\n )\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f\"Got new URL to check: {message['url']}.\")\n try:\n urllib.request.urlopen('https://github.com/' + message['url'])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(\n message['id']) + '/update', json.dumps({'link_accessible': valid}).\n encode('utf8'), method='POST', headers={'content-type':\n 'application/json'})\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print('Validator worker started. Waiting for tasks to do...')\n validate_urls()\n",
"step-5": "import json\nimport pika\nimport urllib.request\n\n\ndef validate_urls():\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\n channel = connection.channel()\n channel.queue_declare(queue='urlValidationQueue')\n channel.basic_consume(validate_url,\n queue='urlValidationQueue',\n no_ack=True)\n channel.start_consuming()\n\n\ndef validate_url(ch, method, properties, body):\n message = json.loads(body)\n valid = True\n print(f'Got new URL to check: {message[\"url\"]}.')\n\n try:\n urllib.request.urlopen('https://github.com/' + message[\"url\"])\n except urllib.error.HTTPError as e:\n if e.code != 200:\n valid = False\n\n print(f'Checking done. Link accessible: {valid}.')\n request = urllib.request.Request('http://localhost:5002/post/' + str(message[\"id\"]) + '/update',\n json.dumps({'link_accessible': valid}).encode('utf8'), method='POST',\n headers={'content-type': 'application/json'})\n\n urllib.request.urlopen(request)\n print(f'Post status updated.')\n\n\nif __name__ == '__main__':\n print(\"Validator worker started. Waiting for tasks to do...\")\n validate_urls()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
age=int(input('请输入您的年龄:'))
subject=input('请输入您的专业:')
college=input('请输入您是否毕业于重点大学:(是/不是)')
if (subject=='电子信息工程' and age>25) or (subject=='电子信息工程' and college=='是') or (age<28 and subject=='计算机'):
print('恭喜您被录取!')
else:
print('抱歉,您未达到面试要求')
|
normal
|
{
"blob_id": "4282303e3e6ee122f1379bea73c619870f983f61",
"index": 8580,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-3": "age = int(input('请输入您的年龄:'))\nsubject = input('请输入您的专业:')\ncollege = input('请输入您是否毕业于重点大学:(是/不是)')\nif subject == '电子信息工程' and age > 25 or subject == '电子信息工程' and college == '是' or age < 28 and subject == '计算机':\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-4": "age=int(input('请输入您的年龄:'))\nsubject=input('请输入您的专业:')\ncollege=input('请输入您是否毕业于重点大学:(是/不是)')\nif (subject=='电子信息工程' and age>25) or (subject=='电子信息工程' and college=='是') or (age<28 and subject=='计算机'):\n print('恭喜您被录取!')\nelse:\n print('抱歉,您未达到面试要求')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import os
import re
import unittest
import webtest
from core.domain import config_domain
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import main
import json
CSRF_REGEX = (
r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)')
# Prefix to append to all lines printed by tests to the console.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors:
self.assertEqual(json_response.status_int, 200)
self.assertEqual(
json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('[email protected]', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def get_current_logged_in_user_id(self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True))
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_taskqueue(self):
return len(self.taskqueue_stub.get_filtered_tasks())
def process_and_flush_pending_tasks(self):
from google.appengine.ext import deferred
tasks = self.taskqueue_stub.get_filtered_tasks()
self.taskqueue_stub.FlushQueue('default')
while tasks:
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks are expected to be mapreduce ones.
headers = {
key: str(val) for key, val in task.headers.iteritems()
}
headers['Content-Length'] = str(len(task.payload or ''))
response = self.testapp.post(
url=str(task.url), params=(task.payload or ''),
headers=headers)
if response.status_code != 200:
raise RuntimeError(
'MapReduce task to URL %s failed' % task.url)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.taskqueue_stub.FlushQueue('default')
if feconf.PLATFORM == 'gae':
GenericTestBase = AppEngineTestBase
else:
raise Exception('Invalid platform: expected one of [\'gae\']')
|
normal
|
{
"blob_id": "8a848eece6a3ed07889ba208068de4bfa0ad0bbf",
"index": 6744,
"step-1": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common utilities for test classes.\"\"\"\n\nimport contextlib\nimport os\nimport re\nimport unittest\nimport webtest\n\nfrom core.domain import config_domain\nfrom core.domain import exp_domain\nfrom core.domain import exp_services\nfrom core.platform import models\ncurrent_user_services = models.Registry.import_current_user_services()\nimport feconf\nimport main\n\nimport json\n\n\nCSRF_REGEX = (\n r'csrf_token: JSON\\.parse\\(\\'\\\\\\\"([A-Za-z0-9/=_-]+)\\\\\\\"\\'\\)')\n# Prefix to append to all lines printed by tests to the console.\nLOG_LINE_PREFIX = 'LOG_INFO_TEST: '\n\n\ndef empty_environ():\n os.environ['AUTH_DOMAIN'] = 'example.com'\n os.environ['SERVER_NAME'] = 'localhost'\n os.environ['HTTP_HOST'] = 'localhost'\n os.environ['SERVER_PORT'] = '8080'\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['USER_IS_ADMIN'] = '0'\n os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (\n os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])\n\n\nclass TestBase(unittest.TestCase):\n \"\"\"Base class for all tests.\"\"\"\n\n maxDiff = 2500\n\n DEFAULT_USERNAME = 'defaultusername'\n\n def setUp(self):\n raise NotImplementedError\n\n def tearDown(self):\n raise NotImplementedError\n\n def log_line(self, line):\n \"\"\"Print the line with a prefix that can be identified by the\n script that calls the test.\n \"\"\"\n print '%s%s' % (LOG_LINE_PREFIX, line)\n\n def _delete_all_models(self):\n raise NotImplementedError\n\n def login(self, email, is_super_admin=False):\n os.environ['USER_EMAIL'] = email\n os.environ['USER_ID'] = self.get_user_id_from_email(email)\n os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'\n\n def logout(self):\n os.environ['USER_EMAIL'] = ''\n os.environ['USER_ID'] = ''\n os.environ['USER_IS_ADMIN'] = '0'\n\n def shortDescription(self):\n \"\"\"Additional information logged during unit test invocation.\"\"\"\n # Suppress default logging of docstrings.\n return None\n\n def get_expected_login_url(self, slug):\n \"\"\"Returns the expected login URL.\"\"\"\n return current_user_services.create_login_url(slug)\n\n def get_expected_logout_url(self, slug):\n \"\"\"Returns the expected logout URL.\"\"\"\n return current_user_services.create_logout_url(slug)\n\n def _parse_json_response(self, json_response, expect_errors=False):\n \"\"\"Convert a JSON server response to an object (such as a dict).\"\"\"\n if not expect_errors:\n self.assertEqual(json_response.status_int, 200)\n\n self.assertEqual(\n json_response.content_type, 'application/javascript')\n self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))\n\n return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])\n\n def get_json(self, url):\n \"\"\"Get a JSON response, transformed to a Python object.\"\"\"\n json_response = self.testapp.get(url)\n self.assertEqual(json_response.status_int, 200)\n return self._parse_json_response(json_response, expect_errors=False)\n\n def post_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200, upload_files=None):\n \"\"\"Post an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.post(\n str(url), data, expect_errors=expect_errors,\n upload_files=upload_files)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def put_json(self, url, payload, csrf_token=None, expect_errors=False,\n expected_status_int=200):\n \"\"\"Put an object to the server by JSON; return the received object.\"\"\"\n data = {'payload': json.dumps(payload)}\n if csrf_token:\n data['csrf_token'] = csrf_token\n\n json_response = self.testapp.put(\n str(url), data, expect_errors=expect_errors)\n\n self.assertEqual(json_response.status_int, expected_status_int)\n return self._parse_json_response(\n json_response, expect_errors=expect_errors)\n\n def get_csrf_token_from_response(self, response):\n \"\"\"Retrieve the CSRF token from a GET response.\"\"\"\n return re.search(CSRF_REGEX, response.body).group(1)\n\n def register_editor(self, email, username=None):\n \"\"\"Register a user with the given username as an editor.\"\"\"\n if username is None:\n username = self.DEFAULT_USERNAME\n\n self.login(email)\n\n response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)\n csrf_token = self.get_csrf_token_from_response(response)\n\n response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {\n 'csrf_token': csrf_token,\n 'payload': json.dumps({\n 'username': username,\n 'agreed_to_terms': True\n })\n })\n self.assertEqual(response.status_int, 200)\n\n self.logout()\n\n def set_admins(self, admin_emails):\n \"\"\"Set the ADMIN_EMAILS property.\"\"\"\n self.login('[email protected]', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.ADMIN_EMAILS.name: admin_emails,\n }\n }, csrf_token)\n self.logout()\n\n def set_moderators(self, moderator_emails):\n \"\"\"Set the MODERATOR_EMAILS property.\"\"\"\n self.login('[email protected]', is_super_admin=True)\n response = self.testapp.get('/admin')\n csrf_token = self.get_csrf_token_from_response(response)\n self.post_json('/adminhandler', {\n 'action': 'save_config_properties',\n 'new_config_property_values': {\n config_domain.MODERATOR_EMAILS.name: moderator_emails,\n }\n }, csrf_token)\n self.logout()\n\n def get_current_logged_in_user_id(self):\n return os.environ['USER_ID']\n\n def get_user_id_from_email(self, email):\n return current_user_services.get_user_id_from_email(email)\n\n def save_new_default_exploration(self,\n exploration_id, owner_id, title='A title'):\n \"\"\"Saves a new default exploration written by owner_id.\n\n Returns the exploration domain object.\n \"\"\"\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration\n\n def save_new_valid_exploration(\n self, exploration_id, owner_id, title='A title'):\n \"\"\"Saves a new strictly-validated exploration.\n\n Returns the exploration domain object.\n \"\"\"\n exploration = exp_domain.Exploration.create_default_exploration(\n exploration_id, title, 'A category')\n exploration.states[exploration.init_state_name].widget.handlers[\n 0].rule_specs[0].dest = feconf.END_DEST\n exploration.objective = 'An objective'\n exp_services.save_new_exploration(owner_id, exploration)\n return exploration\n\n @contextlib.contextmanager\n def swap(self, obj, attr, newvalue):\n \"\"\"Swap an object's attribute value within the context of a\n 'with' statement. The object can be anything that supports\n getattr and setattr, such as class instances, modules, ...\n\n Example usage:\n\n import math\n with self.swap(math, \"sqrt\", lambda x: 42):\n print math.sqrt(16.0) # prints 42\n print math.sqrt(16.0) # prints 4 as expected.\n \"\"\"\n original = getattr(obj, attr)\n setattr(obj, attr, newvalue)\n try:\n yield\n finally:\n setattr(obj, attr, original)\n\n\nclass AppEngineTestBase(TestBase):\n \"\"\"Base class for tests requiring App Engine services.\"\"\"\n\n def _delete_all_models(self):\n from google.appengine.ext import ndb\n ndb.delete_multi(ndb.Query().iter(keys_only=True))\n\n def setUp(self):\n empty_environ()\n\n from google.appengine.datastore import datastore_stub_util\n from google.appengine.ext import testbed\n\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n\n # Configure datastore policy to emulate instantaneously and globally\n # consistent HRD.\n policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(\n probability=1)\n\n # Declare any relevant App Engine service stubs here.\n self.testbed.init_user_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_datastore_v3_stub(consistency_policy=policy)\n self.testbed.init_taskqueue_stub()\n self.taskqueue_stub = self.testbed.get_stub(\n testbed.TASKQUEUE_SERVICE_NAME)\n self.testbed.init_urlfetch_stub()\n self.testbed.init_files_stub()\n self.testbed.init_blobstore_stub()\n\n # Set up the app to be tested.\n self.testapp = webtest.TestApp(main.app)\n\n def tearDown(self):\n self.logout()\n self._delete_all_models()\n self.testbed.deactivate()\n\n def count_jobs_in_taskqueue(self):\n return len(self.taskqueue_stub.get_filtered_tasks())\n\n def process_and_flush_pending_tasks(self):\n from google.appengine.ext import deferred\n\n tasks = self.taskqueue_stub.get_filtered_tasks()\n self.taskqueue_stub.FlushQueue('default')\n while tasks:\n for task in tasks:\n if task.url == '/_ah/queue/deferred':\n deferred.run(task.payload)\n else:\n # All other tasks are expected to be mapreduce ones.\n headers = {\n key: str(val) for key, val in task.headers.iteritems()\n }\n headers['Content-Length'] = str(len(task.payload or ''))\n response = self.testapp.post(\n url=str(task.url), params=(task.payload or ''),\n headers=headers)\n if response.status_code != 200:\n raise RuntimeError(\n 'MapReduce task to URL %s failed' % task.url)\n\n tasks = self.taskqueue_stub.get_filtered_tasks()\n self.taskqueue_stub.FlushQueue('default')\n\n\nif feconf.PLATFORM == 'gae':\n GenericTestBase = AppEngineTestBase\nelse:\n raise Exception('Invalid platform: expected one of [\\'gae\\']')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from PIL import Image, ImageDraw
import torch
from torchvision import transforms
import cfg
from label import point_inside_of_quad
from model_VGG import advancedEAST
from preprocess import resize_image
from nms import nms
def sigmoid(x):
"""`y = 1 / (1 + exp(-x))`"""
return 1 / (1 + np.exp(-x))
def cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):
geo /= [scale_ratio_w, scale_ratio_h]
p_min = np.amin(geo, axis=0)
p_max = np.amax(geo, axis=0)
min_xy = p_min.astype(int)
max_xy = p_max.astype(int) + 2
sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()
for m in range(min_xy[1], max_xy[1]):
for n in range(min_xy[0], max_xy[0]):
if not point_inside_of_quad(n, m, geo, p_min, p_max):
sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255
sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')
sub_im.save(img_path + '_subim%d.jpg' % s)
def predict(east_detect, img_path, pixel_threshold, quiet=False):
img = Image.open(img_path) # 为PIL图像对象,默认RGB
d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
x = transforms.ToTensor()(img)
x = torch.unsqueeze(x, 0) # 增加一个维度
y = east_detect(x)
y = torch.squeeze(y, 0) # 减少一个维度
print(y.shape)
y = y.detach().numpy() # 7*64*64
if y.shape[0] == 7:
y = y.transpose((1, 2, 0)) # CHW->HWC
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
with Image.open(img_path) as im:
im_array = np.array(im.convert('RGB')) # 图片转为numpy数组
d_wight, d_height = resize_image(im, cfg.max_predict_img_size)
scale_ratio_w = d_wight / im.width
scale_ratio_h = d_height / im.height
im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')
quad_im = im.copy()
draw = ImageDraw.Draw(im)
for i, j in zip(activation_pixels[0], activation_pixels[1]):
px = (j + 0.5) * cfg.pixel_size
py = (i + 0.5) * cfg.pixel_size
line_width, line_color = 1, 'red'
if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:
if y[i, j, 2] < cfg.trunc_threshold:
line_width, line_color = 2, 'yellow'
elif y[i, j, 2] >= 1 - cfg.trunc_threshold:
line_width, line_color = 2, 'green'
draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),
(px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),
(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size)],
width=line_width, fill=line_color)
im.save(img_path + '_act.jpg')
quad_draw = ImageDraw.Draw(quad_im)
txt_items = []
for score, geo, s in zip(quad_scores, quad_after_nms,
range(len(quad_scores))):
if np.amin(score) > 0:
quad_draw.line([tuple(geo[0]),
tuple(geo[1]),
tuple(geo[2]),
tuple(geo[3]),
tuple(geo[0])], width=2, fill='red')
if cfg.predict_cut_text_line:
cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array,
img_path, s)
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h] # (N, 4, 2)标签坐标
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
elif not quiet:
print('quad invalid with vertex num less then 4.')
quad_im.save(img_path + '_predict.jpg')
if cfg.predict_write2txt and len(txt_items) > 0:
with open(img_path[:-4] + '.txt', 'w') as f_txt:
f_txt.writelines(txt_items)
def predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):
img = Image.open(img_path) # 为PIL图像对象,默认RGB
d_wight, d_height = resize_image(img, cfg.max_predict_img_size)
scale_ratio_w = d_wight / img.width
scale_ratio_h = d_height / img.height
transform = transforms.Compose([
transforms.Resize((d_wight, d_height), interpolation=2),
transforms.ToTensor()
])
x = transform(img)
x = torch.unsqueeze(x, 0) # 增加一个维度
y = east_detect(x)
y = torch.squeeze(y, 0) # 减少一个维度
print(y.shape)
y = y.detach().numpy() # 7*64*64
if y.shape[0] == 7:
y = y.transpose((1, 2, 0)) # CHW->HWC
y[:, :, :3] = sigmoid(y[:, :, :3])
cond = np.greater_equal(y[:, :, 0], pixel_threshold)
activation_pixels = np.where(cond)
quad_scores, quad_after_nms = nms(y, activation_pixels)
txt_items = []
for score, geo in zip(quad_scores, quad_after_nms):
if np.amin(score) > 0:
rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]
rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()
txt_item = ','.join(map(str, rescaled_geo_list))
txt_items.append(txt_item + '\n')
elif not quiet:
print('quad invalid with vertex num less then 4.')
if cfg.predict_write2txt and len(txt_items) > 0:
with open(txt_path, 'w') as f_txt:
f_txt.writelines(txt_items)
if __name__ == '__main__':
if not os.path.exists('demo'):
os.makedirs('./demo', exist_ok=True)
img_path = cfg.img_path
threshold = float(cfg.predict_threshold)
pth_path = cfg.pth_path if cfg.pth_path else 'saved_model/3T736_latest.pth'
print(img_path, threshold)
east = advancedEAST()
state_dict = {k.replace('module.', ''): v for k, v in torch.load(pth_path, map_location='cpu').items()}
east.load_state_dict(state_dict)
predict(east, img_path, threshold)
|
normal
|
{
"blob_id": "48cef0377087d9245aad1fb759adf8ff07d2b66f",
"index": 4464,
"step-1": "<mask token>\n\n\ndef cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):\n geo /= [scale_ratio_w, scale_ratio_h]\n p_min = np.amin(geo, axis=0)\n p_max = np.amax(geo, axis=0)\n min_xy = p_min.astype(int)\n max_xy = p_max.astype(int) + 2\n sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()\n for m in range(min_xy[1], max_xy[1]):\n for n in range(min_xy[0], max_xy[0]):\n if not point_inside_of_quad(n, m, geo, p_min, p_max):\n sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255\n sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')\n sub_im.save(img_path + '_subim%d.jpg' % s)\n\n\n<mask token>\n\n\ndef predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / img.width\n scale_ratio_h = d_height / img.height\n transform = transforms.Compose([transforms.Resize((d_wight, d_height),\n interpolation=2), transforms.ToTensor()])\n x = transform(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n txt_items = []\n for score, geo in zip(quad_scores, quad_after_nms):\n if np.amin(score) > 0:\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(txt_path, 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sigmoid(x):\n \"\"\"`y = 1 / (1 + exp(-x))`\"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):\n geo /= [scale_ratio_w, scale_ratio_h]\n p_min = np.amin(geo, axis=0)\n p_max = np.amax(geo, axis=0)\n min_xy = p_min.astype(int)\n max_xy = p_max.astype(int) + 2\n sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()\n for m in range(min_xy[1], max_xy[1]):\n for n in range(min_xy[0], max_xy[0]):\n if not point_inside_of_quad(n, m, geo, p_min, p_max):\n sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255\n sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')\n sub_im.save(img_path + '_subim%d.jpg' % s)\n\n\ndef predict(east_detect, img_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n x = transforms.ToTensor()(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n with Image.open(img_path) as im:\n im_array = np.array(im.convert('RGB'))\n d_wight, d_height = resize_image(im, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / im.width\n scale_ratio_h = d_height / im.height\n im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n quad_im = im.copy()\n draw = ImageDraw.Draw(im)\n for i, j in zip(activation_pixels[0], activation_pixels[1]):\n px = (j + 0.5) * cfg.pixel_size\n py = (i + 0.5) * cfg.pixel_size\n line_width, line_color = 1, 'red'\n if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:\n if y[i, j, 2] < cfg.trunc_threshold:\n line_width, line_color = 2, 'yellow'\n elif y[i, j, 2] >= 1 - cfg.trunc_threshold:\n line_width, line_color = 2, 'green'\n draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size)], width=line_width, fill=line_color)\n im.save(img_path + '_act.jpg')\n quad_draw = ImageDraw.Draw(quad_im)\n txt_items = []\n for score, geo, s in zip(quad_scores, quad_after_nms, range(len(\n quad_scores))):\n if np.amin(score) > 0:\n quad_draw.line([tuple(geo[0]), tuple(geo[1]), tuple(geo[2]),\n tuple(geo[3]), tuple(geo[0])], width=2, fill='red')\n if cfg.predict_cut_text_line:\n cut_text_line(geo, scale_ratio_w, scale_ratio_h,\n im_array, img_path, s)\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n quad_im.save(img_path + '_predict.jpg')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(img_path[:-4] + '.txt', 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\ndef predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / img.width\n scale_ratio_h = d_height / img.height\n transform = transforms.Compose([transforms.Resize((d_wight, d_height),\n interpolation=2), transforms.ToTensor()])\n x = transform(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n txt_items = []\n for score, geo in zip(quad_scores, quad_after_nms):\n if np.amin(score) > 0:\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(txt_path, 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sigmoid(x):\n \"\"\"`y = 1 / (1 + exp(-x))`\"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):\n geo /= [scale_ratio_w, scale_ratio_h]\n p_min = np.amin(geo, axis=0)\n p_max = np.amax(geo, axis=0)\n min_xy = p_min.astype(int)\n max_xy = p_max.astype(int) + 2\n sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()\n for m in range(min_xy[1], max_xy[1]):\n for n in range(min_xy[0], max_xy[0]):\n if not point_inside_of_quad(n, m, geo, p_min, p_max):\n sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255\n sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')\n sub_im.save(img_path + '_subim%d.jpg' % s)\n\n\ndef predict(east_detect, img_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n x = transforms.ToTensor()(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n with Image.open(img_path) as im:\n im_array = np.array(im.convert('RGB'))\n d_wight, d_height = resize_image(im, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / im.width\n scale_ratio_h = d_height / im.height\n im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n quad_im = im.copy()\n draw = ImageDraw.Draw(im)\n for i, j in zip(activation_pixels[0], activation_pixels[1]):\n px = (j + 0.5) * cfg.pixel_size\n py = (i + 0.5) * cfg.pixel_size\n line_width, line_color = 1, 'red'\n if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:\n if y[i, j, 2] < cfg.trunc_threshold:\n line_width, line_color = 2, 'yellow'\n elif y[i, j, 2] >= 1 - cfg.trunc_threshold:\n line_width, line_color = 2, 'green'\n draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size)], width=line_width, fill=line_color)\n im.save(img_path + '_act.jpg')\n quad_draw = ImageDraw.Draw(quad_im)\n txt_items = []\n for score, geo, s in zip(quad_scores, quad_after_nms, range(len(\n quad_scores))):\n if np.amin(score) > 0:\n quad_draw.line([tuple(geo[0]), tuple(geo[1]), tuple(geo[2]),\n tuple(geo[3]), tuple(geo[0])], width=2, fill='red')\n if cfg.predict_cut_text_line:\n cut_text_line(geo, scale_ratio_w, scale_ratio_h,\n im_array, img_path, s)\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n quad_im.save(img_path + '_predict.jpg')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(img_path[:-4] + '.txt', 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\ndef predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / img.width\n scale_ratio_h = d_height / img.height\n transform = transforms.Compose([transforms.Resize((d_wight, d_height),\n interpolation=2), transforms.ToTensor()])\n x = transform(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n txt_items = []\n for score, geo in zip(quad_scores, quad_after_nms):\n if np.amin(score) > 0:\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(txt_path, 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\nif __name__ == '__main__':\n if not os.path.exists('demo'):\n os.makedirs('./demo', exist_ok=True)\n img_path = cfg.img_path\n threshold = float(cfg.predict_threshold)\n pth_path = cfg.pth_path if cfg.pth_path else 'saved_model/3T736_latest.pth'\n print(img_path, threshold)\n east = advancedEAST()\n state_dict = {k.replace('module.', ''): v for k, v in torch.load(\n pth_path, map_location='cpu').items()}\n east.load_state_dict(state_dict)\n predict(east, img_path, threshold)\n",
"step-4": "import os\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport torch\nfrom torchvision import transforms\nimport cfg\nfrom label import point_inside_of_quad\nfrom model_VGG import advancedEAST\nfrom preprocess import resize_image\nfrom nms import nms\n\n\ndef sigmoid(x):\n \"\"\"`y = 1 / (1 + exp(-x))`\"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):\n geo /= [scale_ratio_w, scale_ratio_h]\n p_min = np.amin(geo, axis=0)\n p_max = np.amax(geo, axis=0)\n min_xy = p_min.astype(int)\n max_xy = p_max.astype(int) + 2\n sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()\n for m in range(min_xy[1], max_xy[1]):\n for n in range(min_xy[0], max_xy[0]):\n if not point_inside_of_quad(n, m, geo, p_min, p_max):\n sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255\n sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')\n sub_im.save(img_path + '_subim%d.jpg' % s)\n\n\ndef predict(east_detect, img_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n x = transforms.ToTensor()(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n with Image.open(img_path) as im:\n im_array = np.array(im.convert('RGB'))\n d_wight, d_height = resize_image(im, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / im.width\n scale_ratio_h = d_height / im.height\n im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n quad_im = im.copy()\n draw = ImageDraw.Draw(im)\n for i, j in zip(activation_pixels[0], activation_pixels[1]):\n px = (j + 0.5) * cfg.pixel_size\n py = (i + 0.5) * cfg.pixel_size\n line_width, line_color = 1, 'red'\n if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:\n if y[i, j, 2] < cfg.trunc_threshold:\n line_width, line_color = 2, 'yellow'\n elif y[i, j, 2] >= 1 - cfg.trunc_threshold:\n line_width, line_color = 2, 'green'\n draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size), (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.\n pixel_size), (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.\n pixel_size)], width=line_width, fill=line_color)\n im.save(img_path + '_act.jpg')\n quad_draw = ImageDraw.Draw(quad_im)\n txt_items = []\n for score, geo, s in zip(quad_scores, quad_after_nms, range(len(\n quad_scores))):\n if np.amin(score) > 0:\n quad_draw.line([tuple(geo[0]), tuple(geo[1]), tuple(geo[2]),\n tuple(geo[3]), tuple(geo[0])], width=2, fill='red')\n if cfg.predict_cut_text_line:\n cut_text_line(geo, scale_ratio_w, scale_ratio_h,\n im_array, img_path, s)\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n quad_im.save(img_path + '_predict.jpg')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(img_path[:-4] + '.txt', 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\ndef predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):\n img = Image.open(img_path)\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / img.width\n scale_ratio_h = d_height / img.height\n transform = transforms.Compose([transforms.Resize((d_wight, d_height),\n interpolation=2), transforms.ToTensor()])\n x = transform(img)\n x = torch.unsqueeze(x, 0)\n y = east_detect(x)\n y = torch.squeeze(y, 0)\n print(y.shape)\n y = y.detach().numpy()\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0))\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n txt_items = []\n for score, geo in zip(quad_scores, quad_after_nms):\n if np.amin(score) > 0:\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(txt_path, 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\nif __name__ == '__main__':\n if not os.path.exists('demo'):\n os.makedirs('./demo', exist_ok=True)\n img_path = cfg.img_path\n threshold = float(cfg.predict_threshold)\n pth_path = cfg.pth_path if cfg.pth_path else 'saved_model/3T736_latest.pth'\n print(img_path, threshold)\n east = advancedEAST()\n state_dict = {k.replace('module.', ''): v for k, v in torch.load(\n pth_path, map_location='cpu').items()}\n east.load_state_dict(state_dict)\n predict(east, img_path, threshold)\n",
"step-5": "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport numpy as np\nfrom PIL import Image, ImageDraw\nimport torch\nfrom torchvision import transforms\n\nimport cfg\nfrom label import point_inside_of_quad\nfrom model_VGG import advancedEAST\nfrom preprocess import resize_image\nfrom nms import nms\n\n\ndef sigmoid(x):\n \"\"\"`y = 1 / (1 + exp(-x))`\"\"\"\n return 1 / (1 + np.exp(-x))\n\n\ndef cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array, img_path, s):\n geo /= [scale_ratio_w, scale_ratio_h]\n p_min = np.amin(geo, axis=0)\n p_max = np.amax(geo, axis=0)\n min_xy = p_min.astype(int)\n max_xy = p_max.astype(int) + 2\n sub_im_arr = im_array[min_xy[1]:max_xy[1], min_xy[0]:max_xy[0], :].copy()\n for m in range(min_xy[1], max_xy[1]):\n for n in range(min_xy[0], max_xy[0]):\n if not point_inside_of_quad(n, m, geo, p_min, p_max):\n sub_im_arr[m - min_xy[1], n - min_xy[0], :] = 255\n sub_im = Image.fromarray(sub_im_arr.astype('uint8')).convert('RGB')\n sub_im.save(img_path + '_subim%d.jpg' % s)\n\n\ndef predict(east_detect, img_path, pixel_threshold, quiet=False):\n img = Image.open(img_path) # 为PIL图像对象,默认RGB\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n img = img.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n x = transforms.ToTensor()(img)\n x = torch.unsqueeze(x, 0) # 增加一个维度\n y = east_detect(x)\n y = torch.squeeze(y, 0) # 减少一个维度\n print(y.shape)\n y = y.detach().numpy() # 7*64*64\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0)) # CHW->HWC\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n with Image.open(img_path) as im:\n im_array = np.array(im.convert('RGB')) # 图片转为numpy数组\n d_wight, d_height = resize_image(im, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / im.width\n scale_ratio_h = d_height / im.height\n im = im.resize((d_wight, d_height), Image.NEAREST).convert('RGB')\n quad_im = im.copy()\n draw = ImageDraw.Draw(im)\n for i, j in zip(activation_pixels[0], activation_pixels[1]):\n px = (j + 0.5) * cfg.pixel_size\n py = (i + 0.5) * cfg.pixel_size\n line_width, line_color = 1, 'red'\n if y[i, j, 1] >= cfg.side_vertex_pixel_threshold:\n if y[i, j, 2] < cfg.trunc_threshold:\n line_width, line_color = 2, 'yellow'\n elif y[i, j, 2] >= 1 - cfg.trunc_threshold:\n line_width, line_color = 2, 'green'\n draw.line([(px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),\n (px + 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size),\n (px + 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),\n (px - 0.5 * cfg.pixel_size, py + 0.5 * cfg.pixel_size),\n (px - 0.5 * cfg.pixel_size, py - 0.5 * cfg.pixel_size)],\n width=line_width, fill=line_color)\n im.save(img_path + '_act.jpg')\n quad_draw = ImageDraw.Draw(quad_im)\n txt_items = []\n for score, geo, s in zip(quad_scores, quad_after_nms,\n range(len(quad_scores))):\n if np.amin(score) > 0:\n quad_draw.line([tuple(geo[0]),\n tuple(geo[1]),\n tuple(geo[2]),\n tuple(geo[3]),\n tuple(geo[0])], width=2, fill='red')\n if cfg.predict_cut_text_line:\n cut_text_line(geo, scale_ratio_w, scale_ratio_h, im_array,\n img_path, s)\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h] # (N, 4, 2)标签坐标\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n quad_im.save(img_path + '_predict.jpg')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(img_path[:-4] + '.txt', 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\ndef predict_txt(east_detect, img_path, txt_path, pixel_threshold, quiet=False):\n img = Image.open(img_path) # 为PIL图像对象,默认RGB\n d_wight, d_height = resize_image(img, cfg.max_predict_img_size)\n scale_ratio_w = d_wight / img.width\n scale_ratio_h = d_height / img.height\n transform = transforms.Compose([\n transforms.Resize((d_wight, d_height), interpolation=2),\n transforms.ToTensor()\n ])\n x = transform(img)\n x = torch.unsqueeze(x, 0) # 增加一个维度\n y = east_detect(x)\n y = torch.squeeze(y, 0) # 减少一个维度\n print(y.shape)\n y = y.detach().numpy() # 7*64*64\n if y.shape[0] == 7:\n y = y.transpose((1, 2, 0)) # CHW->HWC\n y[:, :, :3] = sigmoid(y[:, :, :3])\n cond = np.greater_equal(y[:, :, 0], pixel_threshold)\n activation_pixels = np.where(cond)\n quad_scores, quad_after_nms = nms(y, activation_pixels)\n\n txt_items = []\n for score, geo in zip(quad_scores, quad_after_nms):\n if np.amin(score) > 0:\n rescaled_geo = geo / [scale_ratio_w, scale_ratio_h]\n rescaled_geo_list = np.reshape(rescaled_geo, (8,)).tolist()\n txt_item = ','.join(map(str, rescaled_geo_list))\n txt_items.append(txt_item + '\\n')\n elif not quiet:\n print('quad invalid with vertex num less then 4.')\n if cfg.predict_write2txt and len(txt_items) > 0:\n with open(txt_path, 'w') as f_txt:\n f_txt.writelines(txt_items)\n\n\nif __name__ == '__main__':\n if not os.path.exists('demo'):\n os.makedirs('./demo', exist_ok=True)\n img_path = cfg.img_path\n threshold = float(cfg.predict_threshold)\n pth_path = cfg.pth_path if cfg.pth_path else 'saved_model/3T736_latest.pth'\n print(img_path, threshold)\n\n east = advancedEAST()\n state_dict = {k.replace('module.', ''): v for k, v in torch.load(pth_path, map_location='cpu').items()}\n east.load_state_dict(state_dict)\n predict(east, img_path, threshold)\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
'''
Created on Sep 4, 2014
@author: Jay <[email protected]>
'''
import socket
def ip_validation(ip):
'''
check if the ip address is in a valid format.
'''
try:
socket.inet_aton(ip)
return True
except socket.error:
return False
def connection_validation(ip, port):
'''
check if the ip:port can be connected using socket.
@param port: the port is an integer.
'''
if not ip_validation(ip):
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
result = sock.connect_ex((ip, port))
if result == 0:
return True
else:
return False
if __name__ == '__main__':
ip = '192.168.213.11'
port = 90
print ip_validation(ip)
print connection_validation(ip, port)
|
normal
|
{
"blob_id": "2bc9c0711831d9ed9009d0f9600153709bbcd6da",
"index": 9178,
"step-1": "'''\nCreated on Sep 4, 2014\n\n@author: Jay <[email protected]>\n'''\n\nimport socket\n\n\ndef ip_validation(ip):\n '''\n check if the ip address is in a valid format.\n '''\n try:\n socket.inet_aton(ip)\n return True\n except socket.error:\n return False\n\n\ndef connection_validation(ip, port):\n '''\n check if the ip:port can be connected using socket.\n @param port: the port is an integer.\n '''\n if not ip_validation(ip):\n return False\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(2)\n result = sock.connect_ex((ip, port))\n if result == 0:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n ip = '192.168.213.11'\n port = 90\n print ip_validation(ip)\n print connection_validation(ip, port)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from huobi import RequestClient
from huobi.constant.test import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
obj_list = request_client.get_cross_margin_loan_orders()
if len(obj_list):
for obj in obj_list:
obj.print_object()
print()
|
normal
|
{
"blob_id": "c65969bba72142f4a328f978d78e0235cd56e393",
"index": 8618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-3": "<mask token>\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)\nobj_list = request_client.get_cross_margin_loan_orders()\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-4": "from huobi import RequestClient\nfrom huobi.constant.test import *\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)\nobj_list = request_client.get_cross_margin_loan_orders()\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
import pygame
import pygame.mixer as mixer
def pre_init():
mixer.pre_init(22050, -16, 2, 2048)
def init():
mixer.init()
pygame.mixer.set_num_channels(16)
def deinit():
mixer.quit()
class Music (object):
our_music_volume = 0.8
our_current_music = None
def __init__( self, filename = None ):
self.sound = None
self.channel = None
if filename is not None:
self.load( filename )
def load( self, filename ):
self.sound = mixer.Sound( filename )
def play( self, loop = -1 ):
self.sound.set_volume( Music.our_music_volume )
self.channel = self.sound.play( loop )
Music.our_current_music = self.sound
def stop( self ):
self.sound.stop()
def fadeout( self, millisec ):
self.sound.fadeout( millisec )
def is_playing( self ):
return self.channel is not None and self.channel.get_sound() is self.sound
@staticmethod
def set_global_volume( volume ):
assert volume >= 0.0
assert volume <= 1.0
Music.our_music_volume = volume
if Music.our_current_music is not None:
Music.our_current_music.set_volume( volume )
@staticmethod
def get_global_volume():
return Music.our_music_volume
class Sound (object):
our_sound_volume = 0.8
def __init__( self, filename = None ):
self.sound = None
self.channel = None
if filename is not None:
self.load( filename )
def load( self, filename ):
self.sound = mixer.Sound( filename )
def play( self, loop = 0 ):
"""for infiniteloop, set loop to -1"""
self.sound.set_volume( Sound.our_sound_volume )
self.channel = self.sound.play( loop )
def stop( self ):
self.sound.stop()
def fadeout( self, millisec ):
self.sound.fadeout( millisec )
def is_playing( self ):
return self.channel is not None and self.channel.get_sound() is self.sound
@staticmethod
def set_global_volume( volume ):
assert volume >= 0.0
assert volume <= 1.0
Sound.our_sound_volume = volume
@staticmethod
def get_global_volume():
return Sound.our_sound_volume
|
normal
|
{
"blob_id": "2caea9e7bbef99b19ba917995513413385c7abdf",
"index": 9808,
"step-1": "<mask token>\n\n\nclass Music(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def get_global_volume():\n return Music.our_music_volume\n\n\nclass Sound(object):\n our_sound_volume = 0.8\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=0):\n \"\"\"for infiniteloop, set loop to -1\"\"\"\n self.sound.set_volume(Sound.our_sound_volume)\n self.channel = self.sound.play(loop)\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Sound.our_sound_volume = volume\n\n @staticmethod\n def get_global_volume():\n return Sound.our_sound_volume\n",
"step-2": "<mask token>\n\n\nclass Music(object):\n <mask token>\n <mask token>\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=-1):\n self.sound.set_volume(Music.our_music_volume)\n self.channel = self.sound.play(loop)\n Music.our_current_music = self.sound\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n <mask token>\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Music.our_music_volume = volume\n if Music.our_current_music is not None:\n Music.our_current_music.set_volume(volume)\n\n @staticmethod\n def get_global_volume():\n return Music.our_music_volume\n\n\nclass Sound(object):\n our_sound_volume = 0.8\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=0):\n \"\"\"for infiniteloop, set loop to -1\"\"\"\n self.sound.set_volume(Sound.our_sound_volume)\n self.channel = self.sound.play(loop)\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Sound.our_sound_volume = volume\n\n @staticmethod\n def get_global_volume():\n return Sound.our_sound_volume\n",
"step-3": "<mask token>\n\n\ndef deinit():\n mixer.quit()\n\n\nclass Music(object):\n our_music_volume = 0.8\n our_current_music = None\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=-1):\n self.sound.set_volume(Music.our_music_volume)\n self.channel = self.sound.play(loop)\n Music.our_current_music = self.sound\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Music.our_music_volume = volume\n if Music.our_current_music is not None:\n Music.our_current_music.set_volume(volume)\n\n @staticmethod\n def get_global_volume():\n return Music.our_music_volume\n\n\nclass Sound(object):\n our_sound_volume = 0.8\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=0):\n \"\"\"for infiniteloop, set loop to -1\"\"\"\n self.sound.set_volume(Sound.our_sound_volume)\n self.channel = self.sound.play(loop)\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Sound.our_sound_volume = volume\n\n @staticmethod\n def get_global_volume():\n return Sound.our_sound_volume\n",
"step-4": "import pygame\nimport pygame.mixer as mixer\n\n\ndef pre_init():\n mixer.pre_init(22050, -16, 2, 2048)\n\n\ndef init():\n mixer.init()\n pygame.mixer.set_num_channels(16)\n\n\ndef deinit():\n mixer.quit()\n\n\nclass Music(object):\n our_music_volume = 0.8\n our_current_music = None\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=-1):\n self.sound.set_volume(Music.our_music_volume)\n self.channel = self.sound.play(loop)\n Music.our_current_music = self.sound\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Music.our_music_volume = volume\n if Music.our_current_music is not None:\n Music.our_current_music.set_volume(volume)\n\n @staticmethod\n def get_global_volume():\n return Music.our_music_volume\n\n\nclass Sound(object):\n our_sound_volume = 0.8\n\n def __init__(self, filename=None):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load(filename)\n\n def load(self, filename):\n self.sound = mixer.Sound(filename)\n\n def play(self, loop=0):\n \"\"\"for infiniteloop, set loop to -1\"\"\"\n self.sound.set_volume(Sound.our_sound_volume)\n self.channel = self.sound.play(loop)\n\n def stop(self):\n self.sound.stop()\n\n def fadeout(self, millisec):\n self.sound.fadeout(millisec)\n\n def is_playing(self):\n return self.channel is not None and self.channel.get_sound(\n ) is self.sound\n\n @staticmethod\n def set_global_volume(volume):\n assert volume >= 0.0\n assert volume <= 1.0\n Sound.our_sound_volume = volume\n\n @staticmethod\n def get_global_volume():\n return Sound.our_sound_volume\n",
"step-5": "#!/usr/bin/env python\n\nimport pygame\nimport pygame.mixer as mixer\n\ndef pre_init():\n mixer.pre_init(22050, -16, 2, 2048)\n\ndef init():\n mixer.init()\n pygame.mixer.set_num_channels(16)\n\ndef deinit():\n mixer.quit()\n\n\nclass Music (object):\n our_music_volume = 0.8\n our_current_music = None\n \n def __init__( self, filename = None ):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load( filename )\n\n def load( self, filename ):\n self.sound = mixer.Sound( filename )\n\n def play( self, loop = -1 ):\n self.sound.set_volume( Music.our_music_volume )\n self.channel = self.sound.play( loop )\n Music.our_current_music = self.sound\n \n def stop( self ):\n self.sound.stop()\n\n def fadeout( self, millisec ):\n self.sound.fadeout( millisec )\n\n def is_playing( self ):\n return self.channel is not None and self.channel.get_sound() is self.sound\n\n @staticmethod\n def set_global_volume( volume ):\n assert volume >= 0.0\n assert volume <= 1.0\n\n Music.our_music_volume = volume\n\n if Music.our_current_music is not None:\n Music.our_current_music.set_volume( volume )\n\n @staticmethod\n def get_global_volume():\n return Music.our_music_volume\n \n\nclass Sound (object):\n our_sound_volume = 0.8\n \n def __init__( self, filename = None ):\n self.sound = None\n self.channel = None\n if filename is not None:\n self.load( filename )\n\n def load( self, filename ):\n self.sound = mixer.Sound( filename )\n\n def play( self, loop = 0 ):\n \"\"\"for infiniteloop, set loop to -1\"\"\"\n self.sound.set_volume( Sound.our_sound_volume )\n self.channel = self.sound.play( loop )\n \n def stop( self ):\n self.sound.stop()\n\n def fadeout( self, millisec ):\n self.sound.fadeout( millisec )\n\n def is_playing( self ):\n return self.channel is not None and self.channel.get_sound() is self.sound\n\n @staticmethod\n def set_global_volume( volume ):\n assert volume >= 0.0\n assert volume <= 1.0\n\n Sound.our_sound_volume = volume\n\n @staticmethod\n def get_global_volume():\n return Sound.our_sound_volume\n \n",
"step-ids": [
12,
18,
21,
24,
25
]
}
|
[
12,
18,
21,
24,
25
] |
"""Visit module to add odoo checks
"""
import os
import re
import astroid
import isort
from pylint.checkers import utils
from six import string_types
from .. import misc, settings
ODOO_MSGS = {
# C->convention R->refactor W->warning E->error F->fatal
# Visit odoo module with settings.BASE_OMODULE_ID
'C%d02' % settings.BASE_OMODULE_ID: (
'Missing ./README.rst file. Template here: %s',
'missing-readme',
settings.DESC_DFLT
),
'E%d01' % settings.BASE_OMODULE_ID: (
'%s %s',
'rst-syntax-error',
settings.DESC_DFLT
),
'E%d02' % settings.BASE_OMODULE_ID: (
'%s error: %s',
'xml-syntax-error',
settings.DESC_DFLT
),
'W%d01' % settings.BASE_OMODULE_ID: (
'%s Dangerous filter without explicit `user_id` in xml_id %s',
'dangerous-filter-wo-user',
settings.DESC_DFLT
),
'W%d02' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml record id "%s" in %s',
'duplicate-xml-record-id',
settings.DESC_DFLT
),
'W%d03' % settings.BASE_OMODULE_ID: (
'%s',
'javascript-lint',
settings.DESC_DFLT
),
'W%d04' % settings.BASE_OMODULE_ID: (
'%s Deprecated <openerp> xml node',
'deprecated-openerp-xml-node',
settings.DESC_DFLT
),
'W%d05' % settings.BASE_OMODULE_ID: (
'%s record res.users without '
'context="{\'no_reset_password\': True}"',
'create-user-wo-reset-password',
settings.DESC_DFLT
),
'W%d06' % settings.BASE_OMODULE_ID: (
'%s Duplicate id "%s"',
'duplicate-id-csv',
settings.DESC_DFLT
),
'W%d07' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml field "%s" in lines %s',
'duplicate-xml-fields',
settings.DESC_DFLT
),
'W%d08' % settings.BASE_OMODULE_ID: (
'%s Missing newline',
'missing-newline-extrafiles',
settings.DESC_DFLT
),
'W%d09' % settings.BASE_OMODULE_ID: (
'%s Redundant name module reference in xml_ids "%s".',
'redundant-modulename-xml',
settings.DESC_DFLT
),
'W%d10' % settings.BASE_OMODULE_ID: (
'%s Use wrong tabs indentation instead of four spaces',
'wrong-tabs-instead-of-spaces',
settings.DESC_DFLT
),
'R%d80' % settings.BASE_OMODULE_ID: (
'Consider merging classes inherited to "%s" from %s.',
'consider-merging-classes-inherited',
settings.DESC_DFLT
),
'W%d50' % settings.BASE_OMODULE_ID: (
'Same Odoo module absolute import. You should use '
'relative import with "." '
'instead of "openerp.addons.%s"',
'odoo-addons-relative-import',
settings.DESC_DFLT
),
'W%d40' % settings.BASE_OMODULE_ID: (
'%s Dangerous use of "replace" from view '
'with priority %s < %s. '
'Increase priority or don\'t use "replace". '
'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',
'dangerous-view-replace-wo-priority',
settings.DESC_DFLT
),
'W%d30' % settings.BASE_OMODULE_ID: (
'%s not used from manifest',
'file-not-used',
settings.DESC_DFLT
),
'W%d35' % settings.BASE_OMODULE_ID: (
'External dependency "%s" without ImportError. More info: '
'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'
'#external-dependencies',
'missing-import-error',
settings.DESC_DFLT
),
'W%d36' % settings.BASE_OMODULE_ID: (
'Missing external dependency "%s" from manifest. More info: '
'https://github.com/OCA/odoo-community.org/blob/master/website/'
'Contribution/CONTRIBUTING.rst'
'#external-dependencies',
'missing-manifest-dependency',
settings.DESC_DFLT
),
'W%d38' % settings.BASE_OMODULE_ID: (
'pass into block except. '
'If you really need to use the pass consider logging that exception',
'except-pass',
settings.DESC_DFLT
),
'W%d37' % settings.BASE_OMODULE_ID: (
'%s The xml attribute is missing the translation="off" tag %s',
'xml-attribute-translatable',
settings.DESC_DFLT
),
'W%d42' % settings.BASE_OMODULE_ID: (
'%s Deprecated <tree> xml attribute "%s"',
'xml-deprecated-tree-attribute',
settings.DESC_DFLT
),
'W%d43' % settings.BASE_OMODULE_ID: (
'%s Deprecated QWeb directive "%s". Use "t-options" instead',
'xml-deprecated-qweb-directive',
settings.DESC_DFLT
),
'W%d39' % settings.BASE_OMODULE_ID: (
'%s Use <odoo> instead of <odoo><data> or use <odoo noupdate="1">'
'instead of <odoo><data noupdate="1">',
'deprecated-data-xml-node',
settings.DESC_DFLT
),
'W%d44' % settings.BASE_OMODULE_ID: (
'%s The resource in in src/href contains a not valid chararter',
'character-not-valid-in-resource-link',
settings.DESC_DFLT
),
}
DFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \
'/blob/master/template/module/README.rst'
DFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']
DFTL_MIN_PRIORITY = 99
# Files supported from manifest to convert
# Extracted from openerp/tools/convert.py:def convert_file
DFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']
DFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [
'po', 'js', 'mako', 'rst', 'md', 'markdown']
DFLT_IMPORT_NAME_WHITELIST = [
# self-odoo
'odoo', 'openerp',
# packages for unit tests only
'requests_mock',
# Known external packages of odoo
'PIL', 'anybox.testing.openerp', 'argparse', 'babel',
'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',
'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',
'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',
'ofxparse', 'openid', 'passlib', 'pkg_resources',
'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',
'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',
'requests', 'serial', 'simplejson', 'six', 'suds',
'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',
'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',
]
DFTL_JSLINTRC = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'examples', '.jslintrc'
)
DFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']
DFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',
'update_xml']
class ModuleChecker(misc.WrapperModuleChecker):
name = settings.CFG_SECTION
msgs = ODOO_MSGS
options = (
('readme_template_url', {
'type': 'string',
'metavar': '<string>',
'default': DFTL_README_TMPL_URL,
'help': 'URL of README.rst template file',
}),
('extfiles_to_lint', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_TO_LINT,
'help': 'List of extension files to check separated by a comma.'
}),
('min-priority', {
'type': 'int',
'metavar': '<int>',
'default': DFTL_MIN_PRIORITY,
'help': 'Minimum priority number of a view with replace of fields.'
}),
('extfiles_convert', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_CONVERT,
'help': 'List of extension files supported to convert '
'from manifest separated by a comma.'
}),
('import_name_whitelist', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_IMPORT_NAME_WHITELIST,
'help': 'List of known import dependencies of odoo,'
' separated by a comma.'
}),
('jslintrc', {
'type': 'string',
'metavar': '<path to file>',
'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,
'help': ('A path to a file that contains a configuration file of '
'javascript lint. You can use the environment variable '
'"PYLINT_ODOO_JSLINTRC" too. Default: %s' % DFTL_JSLINTRC)
}),
('deprecated_tree_attributes', {
'type': 'multiple_choice',
'metavar': '<attributes>',
'default': DFLT_DEPRECATED_TREE_ATTRS,
'choices': DFLT_DEPRECATED_TREE_ATTRS,
'help': 'List of deprecated list view attributes,'
' separated by a comma. Valid values: %s' % ', '.join(
DFLT_DEPRECATED_TREE_ATTRS)
}),
)
odoo_check_versions = {
'missing-import-error': {
'max_odoo_version': '11.0',
},
}
class_inherit_names = []
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName) or \
node_left.name not in ('_inherit', '_name') or \
not isinstance(node.value, astroid.node_classes.Const) or \
not isinstance(node.parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
# Skip _name='model.name' _inherit='other.model' because is valid
return
key = (self.odoo_node, _inherit)
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file,
os.path.dirname(odoo_node.file))
path_nodes.append("%s:%d" % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited',
node=nodes[0],
args=(class_dup_name, ', '.join(path_nodes)))
def _get_odoo_module_imported(self, node):
odoo_module = []
if isinstance(node, astroid.ImportFrom) and \
('openerp.addons' in node.modname or
'odoo.addons' in node.modname):
packages = node.modname.split('.')
if len(packages) >= 3:
# from openerp.addons.odoo_module import models
odoo_module.append(packages[2])
else:
# from openerp.addons import odoo_module
odoo_module.append(node.names[0][0])
elif isinstance(node, astroid.Import):
for name, _ in node.names:
if 'openerp.addons' not in name and 'odoo.addons' not in name:
continue
packages = name.split('.')
if len(packages) >= 3:
# import openerp.addons.odoo_module
odoo_module.append(packages[2])
return odoo_module
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node,
args=(self.odoo_module_name))
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if importedmodnode and importedmodnode.file and \
modnode is not importedmodnode and \
importedmodnode.name != name:
return True
return False
@staticmethod
def _get_imported_module(importnode, modname):
try:
return importnode.do_import_module(modname)
except:
pass
def _is_module_name_in_whitelist(self, module_name):
# Try to find most specific placement instruction match (if any)
# (from isort place_module() method)
parts = module_name.split('.')
module_names_to_check = [
'.'.join(parts[:first_k])
for first_k in range(len(parts), 0, -1)
]
# Check if one of the module name is part of the whitelist.
# For an module name such as 'anybox.testing.openerp', the
# modules names to check will be:
# ['anybox.testing.openerp', 'anybox.testing', 'anybox']
# Only one of them has to be in the whitelist to be accepted.
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
# skip local packages because is not a external dependency.
return
if not self.manifest_dict:
# skip if is not a module of odoo
return
if not isinstance(node.parent, astroid.Module):
# skip nested import sentences
return
if self._is_absolute_import(node, module_name):
# skip absolute imports
return
if self._is_module_name_in_whitelist(module_name):
# ignore whitelisted modules
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
# skip if is not a external library or is a white list library
return
relpath = os.path.relpath(
node.parent.file, os.path.dirname(self.manifest_file))
if os.path.dirname(relpath) == 'tests':
# import errors rules don't apply to the test files
# since these files are loaded only when running tests
# and in such a case your
# module and their external dependencies are installed.
return
self.add_message('missing-import-error', node=node,
args=(module_name,))
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and \
module_name.split('.')[0] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node,
args=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if (not handler.name and
len(handler.body) == 1 and
isinstance(handler.body[0], astroid.node_classes.Pass)):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(
os.path.join(self.module_path, rst_file))
for error in errors:
msg = error.full_message
res = re.search(
r'No directive entry for "([\w|\-]+)"|'
r'Unknown directive type "([\w|\-]+)"|'
r'No role entry for "([\w|\-]+)"|'
r'Unknown interpreted text role "([\w|\-]+)"', msg)
# TODO: Add support for sphinx directives after fix
# https://github.com/twolfson/restructuredtext-lint/issues/29
if res:
# Skip directive errors
continue
self.msg_args.append((
"%s:%d" % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _check_missing_readme(self):
"""Check if exists ./README.{rst,md,txt} file
:return: If exists return True else False
"""
self.msg_args = (self.config.readme_template_url,)
for readme in DFTL_README_FILES:
if os.path.isfile(os.path.join(self.module_path, readme)):
return True
return False
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((
xml_file, result.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = "%s/%s_noupdate_%s" % (
record.attrib.get('section', ''),
record.attrib.get('id', ''),
record.getparent().attrib.get('noupdate', '0'),
)
all_records.setdefault(record_id, []).append(record)
# Remove all keys which not duplicated
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in \
self._get_duplicate_xml_record_id(xml_records).items():
self.msg_args.append((
"%s:%d" % (os.path.relpath(fobjs[0].base, self.module_path),
fobjs[0].sourceline),
name,
', '.join([os.path.relpath(fobj.base, self.module_path) +
':' + str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_duplicate_id_csv(self):
"""Check duplicate xml id in ir.model.access.csv files of a odoo module.
:return: False if exists errors and
add list of errors in self.msg_args
"""
all_csv_ids = []
self.msg_args = []
for csv_file_rel in self.filter_files_ext('csv', relpath=True):
csv_file = os.path.join(self.module_path, csv_file_rel)
if os.path.basename(csv_file) == 'ir.model.access.csv':
all_csv_ids.extend(self.get_field_csv(csv_file))
duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)
for duplicated_id_csv in duplicated_ids_csv:
self.msg_args.append((csv_file_rel, duplicated_id_csv))
if duplicated_ids_csv:
return False
return True
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(
xml_file, self.module):
self.msg_args.append(
("%s:%d" % (xml_file_rel, lineno), xml_id))
if self.msg_args:
return False
return True
def _check_character_not_valid_in_resource_link(self):
"""The resource in in src/href contains a not valid chararter"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml'):
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
for name, attr in (('link', 'href'), ('script', 'src')):
nodes = (doc.xpath('.//%s[@%s]' % (name, attr))
if not isinstance(doc, string_types) else [])
for node in nodes:
resource = node.get(attr, '')
ext = os.path.splitext(os.path.basename(resource))[1]
if (resource.startswith('/') and not
re.search('^[.][a-zA-Z]+$', ext)):
self.msg_args.append(("%s:%s" % (xml_file,
node.sourceline)))
if self.msg_args:
return False
return True
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault(
(field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'),
field.getparent()), []).append(field)
# Remove all keys which not duplicated by excluding them from the
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field',
'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(
record.xpath(xpath)).items():
self.msg_args.append((
"%s:%d" % (xml_file, fobjs[0].sourceline), name[0],
', '.join([str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
# if exists field="name" then is a new record
# then should be field="user_id" too
if ir_filter_fields and len(ir_filter_fields) == 1:
# TODO: Add a list of msg_args before of return
# TODO: Add source lineno in all xml checks
self.msg_args = (
"%s:%d" % (xml_file, ir_filter_record.sourceline),
ir_filter_record.get('id'),)
return False
return True
@staticmethod
def _get_priority(view):
try:
priority_node = view.xpath("field[@name='priority'][1]")[0]
return int(priority_node.get('eval', priority_node.text) or 0)
except (IndexError, ValueError):
# IndexError: If the field is not found
# ValueError: If the value found is not valid integer
pass
return 0
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = \
arch.xpath(".//field[@name='name' and @position='replace'][1]") + \
arch.xpath(".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append((
"%s:%s" % (xml_file, view.sourceline), priority,
self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='res.users')
# if exists field="name" then is a new record
# then should be context
self.msg_args.extend([
("%s:%s" % (xml_file, user_record.sourceline))
for user_record in user_records
if user_record.xpath("field[@name='name']") and
'no_reset_password' not in (user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath("/odoo") \
if not isinstance(doc, string_types) else []
children, data_node = ((odoo_nodes[0].getchildren(),
odoo_nodes[0].findall('data'))
if odoo_nodes else ([], []))
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_deprecated_openerp_xml_node(self):
"""Check deprecated <openerp> xml node
:return: False if exists <openerp> node and
add list of xml files in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
openerp_nodes = doc.xpath("/openerp") \
if not isinstance(doc, string_types) else []
if openerp_nodes:
lineno = openerp_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append(
("%s:%d" % (ext_file_rel, countline)))
if self.msg_args:
return False
return True
def _check_missing_newline_extrafiles(self):
"""Check missing newline in other ext files (.xml, .csv, .po)
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
last_line = ''
# NOTE: SEEK_END just is supported with 'rb' mode for py3
with open(ext_file, 'rb') as fp:
if os.stat(ext_file).st_size > 1:
fp.seek(-2, os.SEEK_END)
last_line = fp.readline()
if not (last_line.endswith(b'\n') or
last_line.endswith(b'\r')):
self.msg_args.append((ext_file_rel,))
if self.msg_args:
return False
return True
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
if not fname.endswith('.xml'):
continue
referenced_files.update(
self._get_xml_referenced_files_report(fname, data_type)
)
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {
# those files are relative to the addon path
os.path.join(
*record.attrib[attribute].split(os.sep)[1:]
): data_type
for attribute in ['xml', 'xsl']
for record in self.parse_xml(
os.path.join(self.module_path, fname)
)
.xpath('//report[@%s]' % attribute)
}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(
set(self._get_xml_referenced_files())
)
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [
f for f in (module_files - referenced_files)
if f.split(os.path.sep)[0] not in excluded_dirs
]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if (self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions != ['8.0']):
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(
("%s:%d" % (xml_file, record.sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [
{
'attr': 'colors',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@colors]',
},
{
'attr': 'fonts',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@fonts]',
},
{
'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},
'xpath': './/tree[@string]',
},
]
valid_versions = set(
self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions)
applicable_checks = [check for check in checks if (
check['attr'] in self.config.deprecated_tree_attributes and
bool(valid_versions - check['skip_versions']))]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file),
model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append((
'%s:%d' % (xml_file, record.sourceline),
check['attr']))
if self.msg_args:
return False
return True
def _check_xml_deprecated_qweb_directive(self):
"""Check for use of deprecated QWeb directives t-*-options.
:return: False if deprecated directives are found, in which case
self.msg_args will contain the error messages.
"""
valid_versions = set(self.linter._all_options[
'valid_odoo_versions'].config.valid_odoo_versions)
if not valid_versions & {'10.0', '11.0'}:
return True
deprecated_directives = {
't-esc-options',
't-field-options',
't-raw-options',
}
directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)
xpath = '|'.join(
'/%s//template//*[%s]' % (tag, directive_attrs)
for tag in ('odoo', 'openerp')
)
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=False):
doc = self.parse_xml(xml_file)
if isinstance(doc, string_types):
continue
for node in doc.xpath(xpath):
# Find which directive was used exactly.
directive = next(
iter(set(node.attrib) & deprecated_directives))
self.msg_args.append((
'%s:%d' % (xml_file, node.sourceline), directive))
return not bool(self.msg_args)
|
normal
|
{
"blob_id": "9f34f94422f4847859e9111f34ade2e1274cb543",
"index": 8775,
"step-1": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n <mask token>\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n <mask token>\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n <mask token>\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n <mask token>\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n <mask token>\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n <mask token>\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n <mask token>\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n <mask token>\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n <mask token>\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n <mask token>\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n <mask token>\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName\n ) or node_left.name not in ('_inherit', '_name') or not isinstance(\n node.value, astroid.node_classes.Const) or not isinstance(node.\n parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n return\n key = self.odoo_node, _inherit\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file, os.path.dirname(\n odoo_node.file))\n path_nodes.append('%s:%d' % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited', node=\n nodes[0], args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and ('openerp.addons' in\n node.modname or 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n else:\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node, args\n =self.odoo_module_name)\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if (importedmodnode and importedmodnode.file and modnode is not\n importedmodnode and importedmodnode.name != name):\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n parts = module_name.split('.')\n module_names_to_check = ['.'.join(parts[:first_k]) for first_k in\n range(len(parts), 0, -1)]\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n return\n if not self.manifest_dict:\n return\n if not isinstance(node.parent, astroid.Module):\n return\n if self._is_absolute_import(node, module_name):\n return\n if self._is_module_name_in_whitelist(module_name):\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n return\n relpath = os.path.relpath(node.parent.file, os.path.dirname(self.\n manifest_file))\n if os.path.dirname(relpath) == 'tests':\n return\n self.add_message('missing-import-error', node=node, args=(module_name,)\n )\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and module_name.split('.')[0\n ] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node, args\n =(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error', 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if not handler.name and len(handler.body) == 1 and isinstance(\n handler.body[0], astroid.node_classes.Pass):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(os.path.join(self.module_path,\n rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n 'No directive entry for \"([\\\\w|\\\\-]+)\"|Unknown directive type \"([\\\\w|\\\\-]+)\"|No role entry for \"([\\\\w|\\\\-]+)\"|Unknown interpreted text role \"([\\\\w|\\\\-]+)\"'\n , msg)\n if res:\n continue\n self.msg_args.append(('%s:%d' % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = self.config.readme_template_url,\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((xml_file, result.strip('\\n').replace(\n '\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = '%s/%s_noupdate_%s' % (record.attrib.get('section',\n ''), record.attrib.get('id', ''), record.getparent().attrib\n .get('noupdate', '0'))\n all_records.setdefault(record_id, []).append(record)\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in self._get_duplicate_xml_record_id(xml_records\n ).items():\n self.msg_args.append(('%s:%d' % (os.path.relpath(fobjs[0].base,\n self.module_path), fobjs[0].sourceline), name, ', '.join([(\n os.path.relpath(fobj.base, self.module_path) + ':' + str(\n fobj.sourceline)) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(xml_file,\n self.module):\n self.msg_args.append(('%s:%d' % (xml_file_rel, lineno), xml_id)\n )\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = doc.xpath('.//%s[@%s]' % (name, attr)\n ) if not isinstance(doc, string_types) else []\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if resource.startswith('/') and not re.search(\n '^[.][a-zA-Z]+$', ext):\n self.msg_args.append('%s:%s' % (xml_file, node.\n sourceline))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault((field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'), field.getparent()), []\n ).append(field)\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field', 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(record\n .xpath(xpath)).items():\n self.msg_args.append(('%s:%d' % (xml_file, fobjs[0]\n .sourceline), name[0], ', '.join([str(fobj.\n sourceline) for fobj in fobjs[1:]])))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n if ir_filter_fields and len(ir_filter_fields) == 1:\n self.msg_args = '%s:%d' % (xml_file, ir_filter_record.\n sourceline), ir_filter_record.get('id')\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = arch.xpath(\n \".//field[@name='name' and @position='replace'][1]\") + arch.xpath(\n \".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(os.path.join(self.module_path,\n xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append(('%s:%s' % (xml_file, view.\n sourceline), priority, self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='res.users')\n self.msg_args.extend([('%s:%s' % (xml_file, user_record.\n sourceline)) for user_record in user_records if user_record\n .xpath(\"field[@name='name']\") and 'no_reset_password' not in\n (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath('/odoo') if not isinstance(doc, string_types\n ) else []\n children, data_node = (odoo_nodes[0].getchildren(), odoo_nodes[\n 0].findall('data')) if odoo_nodes else ([], [])\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath('/openerp') if not isinstance(doc,\n string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append('%s:%s' % (xml_file, lineno))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append('%s:%d' % (ext_file_rel,\n countline))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or last_line.\n endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in (self.manifest_dict.get(data_type) or []):\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(self.\n _get_xml_referenced_files_report(fname, data_type))\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {os.path.join(*record.attrib[attribute].split(os.sep)[1:]):\n data_type for attribute in ['xml', 'xsl'] for record in self.\n parse_xml(os.path.join(self.module_path, fname)).xpath(\n '//report[@%s]' % attribute)}\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(set\n (self._get_xml_referenced_files()))\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [f for f in module_files - referenced_files if\n f.split(os.path.sep)[0] not in excluded_dirs]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions != ['8.0']:\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [{'attr': 'colors', 'skip_versions': {'4.2', '5.0', '6.0',\n '6.1', '7.0', '8.0'}, 'xpath': './/tree[@colors]'}, {'attr':\n 'fonts', 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0',\n '8.0'}, 'xpath': './/tree[@fonts]'}, {'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'}, 'xpath':\n './/tree[@string]'}]\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n applicable_checks = [check for check in checks if check['attr'] in\n self.config.deprecated_tree_attributes and bool(valid_versions -\n check['skip_versions'])]\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(os.path.join(self.\n module_path, xml_file), model='ir.ui.view'):\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append(('%s:%d' % (xml_file, record.\n sourceline), check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options['valid_odoo_versions'\n ].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n deprecated_directives = {'t-esc-options', 't-field-options',\n 't-raw-options'}\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join('/%s//template//*[%s]' % (tag, directive_attrs) for\n tag in ('odoo', 'openerp'))\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n directive = next(iter(set(node.attrib) & deprecated_directives)\n )\n self.msg_args.append(('%s:%d' % (xml_file, node.sourceline),\n directive))\n return not bool(self.msg_args)\n",
"step-5": "\"\"\"Visit module to add odoo checks\n\"\"\"\n\nimport os\nimport re\n\nimport astroid\nimport isort\nfrom pylint.checkers import utils\nfrom six import string_types\n\nfrom .. import misc, settings\n\nODOO_MSGS = {\n # C->convention R->refactor W->warning E->error F->fatal\n\n # Visit odoo module with settings.BASE_OMODULE_ID\n 'C%d02' % settings.BASE_OMODULE_ID: (\n 'Missing ./README.rst file. Template here: %s',\n 'missing-readme',\n settings.DESC_DFLT\n ),\n 'E%d01' % settings.BASE_OMODULE_ID: (\n '%s %s',\n 'rst-syntax-error',\n settings.DESC_DFLT\n ),\n 'E%d02' % settings.BASE_OMODULE_ID: (\n '%s error: %s',\n 'xml-syntax-error',\n settings.DESC_DFLT\n ),\n 'W%d01' % settings.BASE_OMODULE_ID: (\n '%s Dangerous filter without explicit `user_id` in xml_id %s',\n 'dangerous-filter-wo-user',\n settings.DESC_DFLT\n ),\n 'W%d02' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml record id \"%s\" in %s',\n 'duplicate-xml-record-id',\n settings.DESC_DFLT\n ),\n 'W%d03' % settings.BASE_OMODULE_ID: (\n '%s',\n 'javascript-lint',\n settings.DESC_DFLT\n ),\n 'W%d04' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <openerp> xml node',\n 'deprecated-openerp-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d05' % settings.BASE_OMODULE_ID: (\n '%s record res.users without '\n 'context=\"{\\'no_reset_password\\': True}\"',\n 'create-user-wo-reset-password',\n settings.DESC_DFLT\n ),\n 'W%d06' % settings.BASE_OMODULE_ID: (\n '%s Duplicate id \"%s\"',\n 'duplicate-id-csv',\n settings.DESC_DFLT\n ),\n 'W%d07' % settings.BASE_OMODULE_ID: (\n '%s Duplicate xml field \"%s\" in lines %s',\n 'duplicate-xml-fields',\n settings.DESC_DFLT\n ),\n 'W%d08' % settings.BASE_OMODULE_ID: (\n '%s Missing newline',\n 'missing-newline-extrafiles',\n settings.DESC_DFLT\n ),\n 'W%d09' % settings.BASE_OMODULE_ID: (\n '%s Redundant name module reference in xml_ids \"%s\".',\n 'redundant-modulename-xml',\n settings.DESC_DFLT\n ),\n 'W%d10' % settings.BASE_OMODULE_ID: (\n '%s Use wrong tabs indentation instead of four spaces',\n 'wrong-tabs-instead-of-spaces',\n settings.DESC_DFLT\n ),\n 'R%d80' % settings.BASE_OMODULE_ID: (\n 'Consider merging classes inherited to \"%s\" from %s.',\n 'consider-merging-classes-inherited',\n settings.DESC_DFLT\n ),\n 'W%d50' % settings.BASE_OMODULE_ID: (\n 'Same Odoo module absolute import. You should use '\n 'relative import with \".\" '\n 'instead of \"openerp.addons.%s\"',\n 'odoo-addons-relative-import',\n settings.DESC_DFLT\n ),\n 'W%d40' % settings.BASE_OMODULE_ID: (\n '%s Dangerous use of \"replace\" from view '\n 'with priority %s < %s. '\n 'Increase priority or don\\'t use \"replace\". '\n 'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',\n 'dangerous-view-replace-wo-priority',\n settings.DESC_DFLT\n ),\n 'W%d30' % settings.BASE_OMODULE_ID: (\n '%s not used from manifest',\n 'file-not-used',\n settings.DESC_DFLT\n ),\n 'W%d35' % settings.BASE_OMODULE_ID: (\n 'External dependency \"%s\" without ImportError. More info: '\n 'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'\n '#external-dependencies',\n 'missing-import-error',\n settings.DESC_DFLT\n ),\n 'W%d36' % settings.BASE_OMODULE_ID: (\n 'Missing external dependency \"%s\" from manifest. More info: '\n 'https://github.com/OCA/odoo-community.org/blob/master/website/'\n 'Contribution/CONTRIBUTING.rst'\n '#external-dependencies',\n 'missing-manifest-dependency',\n settings.DESC_DFLT\n ),\n 'W%d38' % settings.BASE_OMODULE_ID: (\n 'pass into block except. '\n 'If you really need to use the pass consider logging that exception',\n 'except-pass',\n settings.DESC_DFLT\n ),\n 'W%d37' % settings.BASE_OMODULE_ID: (\n '%s The xml attribute is missing the translation=\"off\" tag %s',\n 'xml-attribute-translatable',\n settings.DESC_DFLT\n ),\n 'W%d42' % settings.BASE_OMODULE_ID: (\n '%s Deprecated <tree> xml attribute \"%s\"',\n 'xml-deprecated-tree-attribute',\n settings.DESC_DFLT\n ),\n 'W%d43' % settings.BASE_OMODULE_ID: (\n '%s Deprecated QWeb directive \"%s\". Use \"t-options\" instead',\n 'xml-deprecated-qweb-directive',\n settings.DESC_DFLT\n ),\n 'W%d39' % settings.BASE_OMODULE_ID: (\n '%s Use <odoo> instead of <odoo><data> or use <odoo noupdate=\"1\">'\n 'instead of <odoo><data noupdate=\"1\">',\n 'deprecated-data-xml-node',\n settings.DESC_DFLT\n ),\n 'W%d44' % settings.BASE_OMODULE_ID: (\n '%s The resource in in src/href contains a not valid chararter',\n 'character-not-valid-in-resource-link',\n settings.DESC_DFLT\n ),\n}\n\n\nDFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \\\n '/blob/master/template/module/README.rst'\nDFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']\nDFTL_MIN_PRIORITY = 99\n# Files supported from manifest to convert\n# Extracted from openerp/tools/convert.py:def convert_file\nDFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']\nDFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [\n 'po', 'js', 'mako', 'rst', 'md', 'markdown']\nDFLT_IMPORT_NAME_WHITELIST = [\n # self-odoo\n 'odoo', 'openerp',\n # packages for unit tests only\n 'requests_mock',\n # Known external packages of odoo\n 'PIL', 'anybox.testing.openerp', 'argparse', 'babel',\n 'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',\n 'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',\n 'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',\n 'ofxparse', 'openid', 'passlib', 'pkg_resources',\n 'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',\n 'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',\n 'requests', 'serial', 'simplejson', 'six', 'suds',\n 'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',\n 'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',\n]\nDFTL_JSLINTRC = os.path.join(\n os.path.dirname(os.path.dirname(os.path.realpath(__file__))),\n 'examples', '.jslintrc'\n)\nDFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']\nDFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',\n 'update_xml']\n\n\nclass ModuleChecker(misc.WrapperModuleChecker):\n name = settings.CFG_SECTION\n msgs = ODOO_MSGS\n options = (\n ('readme_template_url', {\n 'type': 'string',\n 'metavar': '<string>',\n 'default': DFTL_README_TMPL_URL,\n 'help': 'URL of README.rst template file',\n }),\n ('extfiles_to_lint', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_TO_LINT,\n 'help': 'List of extension files to check separated by a comma.'\n }),\n ('min-priority', {\n 'type': 'int',\n 'metavar': '<int>',\n 'default': DFTL_MIN_PRIORITY,\n 'help': 'Minimum priority number of a view with replace of fields.'\n }),\n ('extfiles_convert', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_EXTFILES_CONVERT,\n 'help': 'List of extension files supported to convert '\n 'from manifest separated by a comma.'\n }),\n ('import_name_whitelist', {\n 'type': 'csv',\n 'metavar': '<comma separated values>',\n 'default': DFLT_IMPORT_NAME_WHITELIST,\n 'help': 'List of known import dependencies of odoo,'\n ' separated by a comma.'\n }),\n ('jslintrc', {\n 'type': 'string',\n 'metavar': '<path to file>',\n 'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,\n 'help': ('A path to a file that contains a configuration file of '\n 'javascript lint. You can use the environment variable '\n '\"PYLINT_ODOO_JSLINTRC\" too. Default: %s' % DFTL_JSLINTRC)\n }),\n ('deprecated_tree_attributes', {\n 'type': 'multiple_choice',\n 'metavar': '<attributes>',\n 'default': DFLT_DEPRECATED_TREE_ATTRS,\n 'choices': DFLT_DEPRECATED_TREE_ATTRS,\n 'help': 'List of deprecated list view attributes,'\n ' separated by a comma. Valid values: %s' % ', '.join(\n DFLT_DEPRECATED_TREE_ATTRS)\n }),\n )\n\n odoo_check_versions = {\n 'missing-import-error': {\n 'max_odoo_version': '11.0',\n },\n }\n\n class_inherit_names = []\n\n @utils.check_messages('consider-merging-classes-inherited')\n def visit_assign(self, node):\n if not self.odoo_node:\n return\n if not self.linter.is_message_enabled(\n 'consider-merging-classes-inherited', node.lineno):\n return\n node_left = node.targets[0]\n if not isinstance(node_left, astroid.node_classes.AssignName) or \\\n node_left.name not in ('_inherit', '_name') or \\\n not isinstance(node.value, astroid.node_classes.Const) or \\\n not isinstance(node.parent, astroid.ClassDef):\n return\n if node_left.name == '_name':\n node.parent.odoo_attribute_name = node.value.value\n return\n _name = getattr(node.parent, 'odoo_attribute_name', None)\n _inherit = node.value.value\n if _name and _name != _inherit:\n # Skip _name='model.name' _inherit='other.model' because is valid\n return\n key = (self.odoo_node, _inherit)\n node.file = self.linter.current_file\n self.inh_dup.setdefault(key, []).append(node)\n\n def _build_whitelist_module_patterns(self):\n known_patterns = []\n for known_pattern in self.config.import_name_whitelist:\n pattern = known_pattern.replace('*', '.*').replace('?', '.?')\n known_patterns.append(re.compile('^' + pattern + '$'))\n return known_patterns\n\n def open(self):\n \"\"\"Define variables to use cache\"\"\"\n self.inh_dup = {}\n patterns = self._build_whitelist_module_patterns()\n self._whitelist_module_patterns = patterns\n super(ModuleChecker, self).open()\n\n def close(self):\n \"\"\"Final process get all cached values and add messages\"\"\"\n for (odoo_node, class_dup_name), nodes in self.inh_dup.items():\n if len(nodes) == 1:\n continue\n path_nodes = []\n for node in nodes[1:]:\n relpath = os.path.relpath(node.file,\n os.path.dirname(odoo_node.file))\n path_nodes.append(\"%s:%d\" % (relpath, node.lineno))\n self.add_message('consider-merging-classes-inherited',\n node=nodes[0],\n args=(class_dup_name, ', '.join(path_nodes)))\n\n def _get_odoo_module_imported(self, node):\n odoo_module = []\n if isinstance(node, astroid.ImportFrom) and \\\n ('openerp.addons' in node.modname or\n 'odoo.addons' in node.modname):\n packages = node.modname.split('.')\n if len(packages) >= 3:\n # from openerp.addons.odoo_module import models\n odoo_module.append(packages[2])\n else:\n # from openerp.addons import odoo_module\n odoo_module.append(node.names[0][0])\n elif isinstance(node, astroid.Import):\n for name, _ in node.names:\n if 'openerp.addons' not in name and 'odoo.addons' not in name:\n continue\n packages = name.split('.')\n if len(packages) >= 3:\n # import openerp.addons.odoo_module\n odoo_module.append(packages[2])\n return odoo_module\n\n def check_odoo_relative_import(self, node):\n if self.odoo_module_name in self._get_odoo_module_imported(node):\n self.add_message('odoo-addons-relative-import', node=node,\n args=(self.odoo_module_name))\n\n @staticmethod\n def _is_absolute_import(node, name):\n modnode = node.root()\n importedmodnode = ModuleChecker._get_imported_module(node, name)\n if importedmodnode and importedmodnode.file and \\\n modnode is not importedmodnode and \\\n importedmodnode.name != name:\n return True\n return False\n\n @staticmethod\n def _get_imported_module(importnode, modname):\n try:\n return importnode.do_import_module(modname)\n except:\n pass\n\n def _is_module_name_in_whitelist(self, module_name):\n # Try to find most specific placement instruction match (if any)\n # (from isort place_module() method)\n parts = module_name.split('.')\n module_names_to_check = [\n '.'.join(parts[:first_k])\n for first_k in range(len(parts), 0, -1)\n ]\n # Check if one of the module name is part of the whitelist.\n # For an module name such as 'anybox.testing.openerp', the\n # modules names to check will be:\n # ['anybox.testing.openerp', 'anybox.testing', 'anybox']\n # Only one of them has to be in the whitelist to be accepted.\n for module_name_to_check in module_names_to_check:\n for pattern in self._whitelist_module_patterns:\n if pattern.match(module_name_to_check):\n return True\n return False\n\n def _check_imported_packages(self, node, module_name):\n \"\"\"Check if the import node is a external dependency to validate it\"\"\"\n if not module_name:\n # skip local packages because is not a external dependency.\n return\n if not self.manifest_dict:\n # skip if is not a module of odoo\n return\n if not isinstance(node.parent, astroid.Module):\n # skip nested import sentences\n return\n if self._is_absolute_import(node, module_name):\n # skip absolute imports\n return\n if self._is_module_name_in_whitelist(module_name):\n # ignore whitelisted modules\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n # skip if is not a external library or is a white list library\n return\n relpath = os.path.relpath(\n node.parent.file, os.path.dirname(self.manifest_file))\n if os.path.dirname(relpath) == 'tests':\n # import errors rules don't apply to the test files\n # since these files are loaded only when running tests\n # and in such a case your\n # module and their external dependencies are installed.\n return\n self.add_message('missing-import-error', node=node,\n args=(module_name,))\n\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and \\\n module_name.split('.')[0] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node,\n args=(module_name,))\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_importfrom(self, node):\n self.check_odoo_relative_import(node)\n if isinstance(node.scope(), astroid.Module):\n package = node.modname\n self._check_imported_packages(node, package)\n\n @utils.check_messages('odoo-addons-relative-import',\n 'missing-import-error',\n 'missing-manifest-dependency')\n def visit_import(self, node):\n self.check_odoo_relative_import(node)\n for name, _ in node.names:\n if isinstance(node.scope(), astroid.Module):\n self._check_imported_packages(node, name)\n\n @utils.check_messages('except-pass')\n def visit_tryexcept(self, node):\n \"\"\"Visit block try except\"\"\"\n for handler in node.handlers:\n if (not handler.name and\n len(handler.body) == 1 and\n isinstance(handler.body[0], astroid.node_classes.Pass)):\n self.add_message('except-pass', node=handler)\n\n def _check_rst_syntax_error(self):\n \"\"\"Check if rst file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n rst_files = self.filter_files_ext('rst')\n self.msg_args = []\n for rst_file in rst_files:\n errors = self.check_rst_syntax(\n os.path.join(self.module_path, rst_file))\n for error in errors:\n msg = error.full_message\n res = re.search(\n r'No directive entry for \"([\\w|\\-]+)\"|'\n r'Unknown directive type \"([\\w|\\-]+)\"|'\n r'No role entry for \"([\\w|\\-]+)\"|'\n r'Unknown interpreted text role \"([\\w|\\-]+)\"', msg)\n # TODO: Add support for sphinx directives after fix\n # https://github.com/twolfson/restructuredtext-lint/issues/29\n if res:\n # Skip directive errors\n continue\n self.msg_args.append((\n \"%s:%d\" % (rst_file, error.line or 0),\n msg.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_readme(self):\n \"\"\"Check if exists ./README.{rst,md,txt} file\n :return: If exists return True else False\n \"\"\"\n self.msg_args = (self.config.readme_template_url,)\n for readme in DFTL_README_FILES:\n if os.path.isfile(os.path.join(self.module_path, readme)):\n return True\n return False\n\n def _check_xml_syntax_error(self):\n \"\"\"Check if xml file there is syntax error\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n result = self.parse_xml(os.path.join(self.module_path, xml_file))\n if isinstance(result, string_types):\n self.msg_args.append((\n xml_file, result.strip('\\n').replace('\\n', '|')))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_record_id(self, records):\n \"\"\"Get duplicated records based on attribute id\n :param records list: List of lxml.etree.Element \"<record\"\n :return: Duplicated items.\n e.g. {record.id: [record_node1, record_node2]}\n :rtype: dict\n \"\"\"\n all_records = {}\n for record in records:\n record_id = \"%s/%s_noupdate_%s\" % (\n record.attrib.get('section', ''),\n record.attrib.get('id', ''),\n record.getparent().attrib.get('noupdate', '0'),\n )\n all_records.setdefault(record_id, []).append(record)\n # Remove all keys which not duplicated\n records = {}\n for key, items in all_records.items():\n if not len(items) < 2:\n records[key] = items\n return records\n\n def _check_duplicate_xml_record_id(self):\n \"\"\"Check duplicated XML-IDs inside of the files of\n each manifest-section treated them separately\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_records = []\n for fname, section in self._get_manifest_referenced_files().items():\n if os.path.splitext(fname)[1].lower() != '.xml':\n continue\n fname = os.path.join(self.module_path, fname)\n for xml_record in self.get_xml_records(fname):\n xml_record.attrib['section'] = section\n xml_records.append(xml_record)\n for name, fobjs in \\\n self._get_duplicate_xml_record_id(xml_records).items():\n self.msg_args.append((\n \"%s:%d\" % (os.path.relpath(fobjs[0].base, self.module_path),\n fobjs[0].sourceline),\n name,\n ', '.join([os.path.relpath(fobj.base, self.module_path) +\n ':' + str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_duplicate_id_csv(self):\n \"\"\"Check duplicate xml id in ir.model.access.csv files of a odoo module.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True\n\n def _check_redundant_modulename_xml(self):\n \"\"\"Check redundant module name in xml file.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file_rel in self.filter_files_ext('xml', relpath=True):\n xml_file = os.path.join(self.module_path, xml_file_rel)\n for xml_id, lineno in self.get_xml_redundant_module_name(\n xml_file, self.module):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file_rel, lineno), xml_id))\n if self.msg_args:\n return False\n return True\n\n def _check_character_not_valid_in_resource_link(self):\n \"\"\"The resource in in src/href contains a not valid chararter\"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml'):\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n for name, attr in (('link', 'href'), ('script', 'src')):\n nodes = (doc.xpath('.//%s[@%s]' % (name, attr))\n if not isinstance(doc, string_types) else [])\n for node in nodes:\n resource = node.get(attr, '')\n ext = os.path.splitext(os.path.basename(resource))[1]\n if (resource.startswith('/') and not\n re.search('^[.][a-zA-Z]+$', ext)):\n self.msg_args.append((\"%s:%s\" % (xml_file,\n node.sourceline)))\n if self.msg_args:\n return False\n return True\n\n def _get_duplicate_xml_fields(self, fields):\n \"\"\"Get duplicated xml fields based on attribute name\n :param fields list: List of lxml.etree.Element \"<field\"\n :return: Duplicated items.\n e.g. {field.name: [field_node1, field_node2]}\n :rtype: dict\n \"\"\"\n all_fields = {}\n for field in fields:\n field_xml = field.attrib.get('name')\n if not field_xml:\n continue\n all_fields.setdefault(\n (field_xml, field.attrib.get('context'),\n field.attrib.get('filter_domain'),\n field.getparent()), []).append(field)\n # Remove all keys which not duplicated by excluding them from the\n return dict(((name, context, filter_domain, parent_node), nodes) for\n (name, context, filter_domain, parent_node), nodes in\n all_fields.items() if len(nodes) >= 2)\n\n def _check_duplicate_xml_fields(self):\n \"\"\"Check duplicate field in all record of xml files of a odoo module.\n Important note: this check does not work with inherited views.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file)):\n if record.xpath('field[@name=\"inherit_id\"]'):\n continue\n for xpath in ['field', 'field/*/field',\n 'field/*/field/tree/field',\n 'field/*/field/form/field']:\n for name, fobjs in self._get_duplicate_xml_fields(\n record.xpath(xpath)).items():\n self.msg_args.append((\n \"%s:%d\" % (xml_file, fobjs[0].sourceline), name[0],\n ', '.join([str(fobj.sourceline)\n for fobj in fobjs[1:]]),\n ))\n if self.msg_args:\n return False\n return True\n\n def _check_dangerous_filter_wo_user(self):\n \"\"\"Check dangerous filter without a user assigned.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n ir_filter_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.filters')\n for ir_filter_record in ir_filter_records:\n ir_filter_fields = ir_filter_record.xpath(\n \"field[@name='name' or @name='user_id']\")\n # if exists field=\"name\" then is a new record\n # then should be field=\"user_id\" too\n if ir_filter_fields and len(ir_filter_fields) == 1:\n # TODO: Add a list of msg_args before of return\n # TODO: Add source lineno in all xml checks\n self.msg_args = (\n \"%s:%d\" % (xml_file, ir_filter_record.sourceline),\n ir_filter_record.get('id'),)\n return False\n return True\n\n @staticmethod\n def _get_priority(view):\n try:\n priority_node = view.xpath(\"field[@name='priority'][1]\")[0]\n return int(priority_node.get('eval', priority_node.text) or 0)\n except (IndexError, ValueError):\n # IndexError: If the field is not found\n # ValueError: If the value found is not valid integer\n pass\n return 0\n\n @staticmethod\n def _is_replaced_field(view):\n try:\n arch = view.xpath(\"field[@name='arch' and @type='xml'][1]\")[0]\n except IndexError:\n return None\n replaces = \\\n arch.xpath(\".//field[@name='name' and @position='replace'][1]\") + \\\n arch.xpath(\".//xpath[@position='replace'][1]\")\n return bool(replaces)\n\n def _check_dangerous_view_replace_wo_priority(self):\n \"\"\"Check dangerous view defined with low priority\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n views = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='ir.ui.view')\n for view in views:\n priority = self._get_priority(view)\n is_replaced_field = self._is_replaced_field(view)\n if is_replaced_field and priority < self.config.min_priority:\n self.msg_args.append((\n \"%s:%s\" % (xml_file, view.sourceline), priority,\n self.config.min_priority))\n if self.msg_args:\n return False\n return True\n\n def _check_create_user_wo_reset_password(self):\n \"\"\"Check xml records of user without the context\n 'context=\"{'no_reset_password': True}\"'\n This context avoid send email and mail log warning\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n xml_files = self.filter_files_ext('xml')\n for xml_file in xml_files:\n user_records = self.get_xml_records(\n os.path.join(self.module_path, xml_file), model='res.users')\n # if exists field=\"name\" then is a new record\n # then should be context\n self.msg_args.extend([\n (\"%s:%s\" % (xml_file, user_record.sourceline))\n for user_record in user_records\n if user_record.xpath(\"field[@name='name']\") and\n 'no_reset_password' not in (user_record.get('context') or '')])\n if self.msg_args:\n return False\n return True\n\n def _check_javascript_lint(self):\n \"\"\"Check javascript lint\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for js_file_rel in self.filter_files_ext('js', relpath=True):\n js_file = os.path.join(self.module_path, js_file_rel)\n errors = self.check_js_lint(js_file, self.config.jslintrc)\n for error in errors:\n self.msg_args.append((js_file_rel + error,))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_data_xml_node(self):\n \"\"\"Check deprecated <data> xml node inside <odoo> xml node\n :return: False if found <data> xml node inside <odoo> xml node\"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n odoo_nodes = doc.xpath(\"/odoo\") \\\n if not isinstance(doc, string_types) else []\n children, data_node = ((odoo_nodes[0].getchildren(),\n odoo_nodes[0].findall('data'))\n if odoo_nodes else ([], []))\n if len(children) == 1 and len(data_node) == 1:\n lineno = odoo_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_deprecated_openerp_xml_node(self):\n \"\"\"Check deprecated <openerp> xml node\n :return: False if exists <openerp> node and\n add list of xml files in self.msg_args\n \"\"\"\n xml_files = self.filter_files_ext('xml')\n self.msg_args = []\n for xml_file in xml_files:\n doc = self.parse_xml(os.path.join(self.module_path, xml_file))\n openerp_nodes = doc.xpath(\"/openerp\") \\\n if not isinstance(doc, string_types) else []\n if openerp_nodes:\n lineno = openerp_nodes[0].sourceline\n self.msg_args.append((\"%s:%s\" % (xml_file, lineno)))\n if self.msg_args:\n return False\n return True\n\n def _check_wrong_tabs_instead_of_spaces(self):\n \"\"\"Check wrong tabs character instead of four spaces.\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n countline = 0\n with open(ext_file, 'rb') as fp:\n for line in fp:\n countline += 1\n line_space_trip = line.lstrip(b' ')\n if line_space_trip != line_space_trip.lstrip(b'\\t'):\n self.msg_args.append(\n (\"%s:%d\" % (ext_file_rel, countline)))\n if self.msg_args:\n return False\n return True\n\n def _check_missing_newline_extrafiles(self):\n \"\"\"Check missing newline in other ext files (.xml, .csv, .po)\n :return: False if exists errors and\n add list of errors in self.msg_args\n \"\"\"\n self.msg_args = []\n for type_file in self.config.extfiles_to_lint:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n ext_file = os.path.join(self.module_path, ext_file_rel)\n last_line = ''\n # NOTE: SEEK_END just is supported with 'rb' mode for py3\n with open(ext_file, 'rb') as fp:\n if os.stat(ext_file).st_size > 1:\n fp.seek(-2, os.SEEK_END)\n last_line = fp.readline()\n if not (last_line.endswith(b'\\n') or\n last_line.endswith(b'\\r')):\n self.msg_args.append((ext_file_rel,))\n if self.msg_args:\n return False\n return True\n\n def _get_manifest_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n referenced_files[fname] = data_type\n return referenced_files\n\n def _get_xml_referenced_files(self):\n referenced_files = {}\n for data_type in DFTL_MANIFEST_DATA_KEYS:\n for fname in self.manifest_dict.get(data_type) or []:\n if not fname.endswith('.xml'):\n continue\n referenced_files.update(\n self._get_xml_referenced_files_report(fname, data_type)\n )\n return referenced_files\n\n def _get_xml_referenced_files_report(self, fname, data_type):\n return {\n # those files are relative to the addon path\n os.path.join(\n *record.attrib[attribute].split(os.sep)[1:]\n ): data_type\n for attribute in ['xml', 'xsl']\n for record in self.parse_xml(\n os.path.join(self.module_path, fname)\n )\n .xpath('//report[@%s]' % attribute)\n }\n\n def _get_module_files(self):\n module_files = []\n for type_file in self.config.extfiles_convert:\n for ext_file_rel in self.filter_files_ext(type_file, relpath=True):\n module_files.append(ext_file_rel)\n return module_files\n\n def _check_file_not_used(self):\n \"\"\"Check if a file is not used from manifest\"\"\"\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files\n\n def _check_xml_attribute_translatable(self):\n \"\"\"The xml attribute is missing the translation=\"off\" tag\n Example <attribute name=\"groups\">sale.group</attribute>\n \"\"\"\n if (self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions != ['8.0']):\n return True\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file), None,\n '//attribute[not(@name=\"string\") and not(@translation)]'):\n self.msg_args.append(\n (\"%s:%d\" % (xml_file, record.sourceline), 'xml_id'))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_tree_attribute(self):\n \"\"\"The tree-view declaration is using a deprecated attribute.\n Example <tree string=\"Partners\"></tree>\n \"\"\"\n checks = [\n {\n 'attr': 'colors',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@colors]',\n },\n {\n 'attr': 'fonts',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},\n 'xpath': './/tree[@fonts]',\n },\n {\n 'attr': 'string',\n 'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},\n 'xpath': './/tree[@string]',\n },\n ]\n valid_versions = set(\n self.linter._all_options['valid_odoo_versions'].config\n .valid_odoo_versions)\n\n applicable_checks = [check for check in checks if (\n check['attr'] in self.config.deprecated_tree_attributes and\n bool(valid_versions - check['skip_versions']))]\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=True):\n for record in self.get_xml_records(\n os.path.join(self.module_path, xml_file),\n model='ir.ui.view'):\n\n for check in applicable_checks:\n if record.xpath(check['xpath']):\n self.msg_args.append((\n '%s:%d' % (xml_file, record.sourceline),\n check['attr']))\n if self.msg_args:\n return False\n return True\n\n def _check_xml_deprecated_qweb_directive(self):\n \"\"\"Check for use of deprecated QWeb directives t-*-options.\n :return: False if deprecated directives are found, in which case\n self.msg_args will contain the error messages.\n \"\"\"\n valid_versions = set(self.linter._all_options[\n 'valid_odoo_versions'].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n\n deprecated_directives = {\n 't-esc-options',\n 't-field-options',\n 't-raw-options',\n }\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join(\n '/%s//template//*[%s]' % (tag, directive_attrs)\n for tag in ('odoo', 'openerp')\n )\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n # Find which directive was used exactly.\n directive = next(\n iter(set(node.attrib) & deprecated_directives))\n self.msg_args.append((\n '%s:%d' % (xml_file, node.sourceline), directive))\n return not bool(self.msg_args)\n",
"step-ids": [
24,
28,
33,
42,
46
]
}
|
[
24,
28,
33,
42,
46
] |
from end import Client
c = Client()
|
normal
|
{
"blob_id": "1be510e6715d21e814c48fe05496704e9a65d554",
"index": 308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc = Client()\n",
"step-3": "from end import Client\nc = Client()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import datetime
import json
from dateutil import parser
import mock
from python_http_client.exceptions import ForbiddenError
from rdr_service import clock, config
from rdr_service.api_util import open_cloud_file
from rdr_service.clock import FakeClock
from rdr_service.dao.database_utils import format_datetime
from rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \
GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \
GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \
GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao
from rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao
from rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \
GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState
from rdr_service.genomic.genomic_job_components import GenomicFileIngester
from rdr_service.genomic.genomic_job_controller import GenomicJobController
from rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\
GenomicGCROutreachEscalationNotified
from rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline
from rdr_service.participant_enums import WithdrawalStatus
from tests import test_data
from tests.genomics_tests.test_genomic_utils import create_ingestion_test_file
from tests.helpers.unittest_base import BaseTestCase
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = "1" * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
# Double check that the incident was saved successfully, with part of the message
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
# Make sure Slack received the full message
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={
'text': incident_message
}
)
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
file_path_md5 = "Wgs_sample_raw_data/SS_VCF_research/" \
"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum"
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="111111111",
sampleId="222222222222",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName=bucket_name,
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '
'genomics metric record for sample id: '
'21042005280')
def test_accession_data_files(self):
test_bucket_baylor = "fake-data-bucket-baylor"
test_idat_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat"
test_vcf_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz"
test_cram_file = "fake-data-bucket-baylor/Wgs_sample_raw_data/" \
"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram"
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
# run job controller method on each file
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:
controller.accession_data_files(file_path, test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
# idat
expected_idat = GenomicGcDataFile(
id=1,
created=test_time,
modified=test_time,
file_path=test_idat_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01_Grn.idat',
file_type='Grn.idat',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# vcf
expected_vcf = GenomicGcDataFile(
id=2,
created=test_time,
modified=test_time,
file_path=test_vcf_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01.vcf.gz',
file_type='vcf.gz',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# cram
expected_cram = GenomicGcDataFile(
id=3,
created=test_time,
modified=test_time,
file_path=test_cram_file,
gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor',
file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',
file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',
file_type='cram',
identifier_type='sample_id',
identifier_value='21063006771',
ignore_flag=0,
)
# obj mapping
expected_objs = {
0: expected_idat,
1: expected_vcf,
2: expected_cram
}
# verify test objects match expectations
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
ids_should_be_updated = []
# for just created and wf state query and MATCHES criteria
for i in range(4):
ids_should_be_updated.append(
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='Y' if i & 2 == 0 else 'N'
).id
)
# for just created and wf state query and DOES NOT MATCH criteria
for i in range(2):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='aou_array',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
# current config json in base_config.json
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should NOT be RESEARCH/RESULTS blocked
self.assertTrue(all(
obj.blockResearch == 0 and obj.blockResearchReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# clear current set member records
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
# for modified data query and MATCHES criteria
for i in range(4):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(
test_file,
bucket_name,
sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:
controller.ingest_metrics_file(
metric_type='user_events',
file_path=test_file_path,
)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[
'rows']))
participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_reconcile_pdr_data(self, mock_cloud_task):
# init new job run in __enter__
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=first_run[0].id,
startTime=clock.CLOCK.now(),
filePath=f'test_file_path_{i}',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
manifest = self.data_generator.create_database_genomic_manifest_file(
manifestTypeId=2,
filePath=f'test_file_path_{i}'
)
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id,
feedbackRecordCount=2
)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId,
event_name='test_event',
run_id=1,
)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1,
event_type='informing_loop_decision',
module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later',
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co',
email_notification_sent=0,
sample_id='sample_test',
results_type='hdr',
genomic_set_member_id=gen_member.id
)
self.data_generator.create_database_genomic_appointment(
message_record_id=i,
appointment_id=i,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(),
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id,
participant_id=participant.participantId,
module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_genomic_result_viewed(
participant_id=participant.participantId,
event_type='result_viewed',
event_authored_time=clock.CLOCK.now(),
module_type='gem',
sample_id=gen_member.sampleId
)
# gets new records that were created with last job run from above
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = [
'genomic_set',
'genomic_set_member',
'genomic_job_run',
'genomic_file_processed',
'genomic_gc_validation_metrics',
'genomic_manifest_file',
'genomic_manifest_feedback',
'genomic_informing_loop',
'genomic_cvl_results_past_due',
'user_event_metrics',
'genomic_member_report_state',
'genomic_result_viewed',
'genomic_appointment_event'
]
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = "test-bucket"
aw1_file_name = "AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
aw1_manifest_path = f"{bucket_name}/{aw1_file_name}"
aw2_file_name = "AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv"
aw2_manifest_path = f"{bucket_name}/{aw2_file_name}"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create AW2 job_run
aw2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_INGESTION,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# should have no data
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# Create genomic_aw1_raw record
self.data_generator.create_database_genomic_aw1_raw(
file_path=aw1_manifest_path,
package_id="PKG-2104-026571",
biobank_id="A10001",
)
# Create genomic_aw2_raw record
self.data_generator.create_database_genomic_aw2_raw(
file_path=aw2_manifest_path,
biobank_id="A10001",
sample_id="100001",
biobankidsampleid="A10001_100001",
)
# Create AW1 genomic_manifest_file record
aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1,
filePath=aw1_manifest_path,
fileName=aw1_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW2 genomic_manifest_file record
aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2,
filePath=aw2_manifest_path,
fileName=aw2_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW1 file_processed
aw1_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw1_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw1_manifest_file.id,
filePath=f"/{aw1_manifest_path}",
bucketName=bucket_name,
fileName=aw1_file_name,
)
# Create AW2 file_processed
aw2_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw2_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw2_manifest_file.id,
filePath=f"/{aw2_manifest_path}",
bucketName=bucket_name,
fileName=aw2_file_name,
)
# genomic_set_member for AW1
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1,
aw1FileProcessedId=aw1_file_processed.id
)
# genomic_gc_validation_metrics for AW1
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=aw2_file_processed.id
)
# one AW1/AW2 with no deltas
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# empty tables resulting in deltas and cloud task calls
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
# one AW1/AW2 with deltas
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
stored_sample = self.data_generator.create_database_biobank_stored_sample(
biobankId=summary.biobankId,
biobankOrderIdentifier=self.fake.pyint()
)
collection_site = self.data_generator.create_database_site(
siteType='Clinic'
)
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId,
participantId=summary.participantId,
finalizedTime=plus_num
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="1",
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="2",
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood',
collectionTubeId=stored_sample.biobankStoredSampleId
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id,
sexConcordance='True',
drcFpConcordance='Pass',
drcSexConcordance='Pass',
processingStatus='Pass'
)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
# no config object, controller method should return
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1
and obj.informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY,
gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
cvlW1ilHdrJobRunId=gen_job_run.id
)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, '[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should be two => 1 GEM 1 HEALTH
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))
# should only be 2
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))
# should be 4 for num of participants
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should still be two on account of no records
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY
)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
# 4 members updated correctly should return
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))
self.assertTrue(
all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states)
)
# 4 members inserted already should not return
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate() # for test suite
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# insert set members
for b in ["aou_array", "aou_wgs"]:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType=b,
)
# Set up ingested metrics data
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes',
'hdr.informing_loop.started',
'gem.informing_loop.screen3',
'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=p + 1,
created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),
event_name=events[i],
run_id=1,
ignore_flag=0,
)
# Set up informing loop from message broker records
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i,
event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',
module_type='gem',
participant_id=p + 1,
decision_value=decisions[i],
sample_id=100 + p,
event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)
)
# Test for no message but yes user event
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=6,
created_at=datetime.datetime(2021, 12, 29, 00),
event_name='gem.informing_loop.screen8_yes',
run_id=1,
ignore_flag=0,
)
# Run reconcile job
genomic_pipeline.reconcile_informing_loop_responses()
# Test mismatched GEM data ingested correctly
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module="gem"
)
for value in new_il_values:
self.assertEqual("yes", value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ["hdr", "pgx"]:
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module=module
)
for value in new_il_values:
self.assertEqual("no", value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 3 PGX records
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.result_ready",
run_id=1,
)
# 1 HDR Positive
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.informative",
run_id=1,
)
# 1 HDR uninformative
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.uninformative",
run_id=1,
)
# Run job
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
# Test correct data inserted
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == "pgx_v1"]
hdr_record_uninf = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]
hdr_record_pos = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)
self.assertEqual("PGX_RPT_READY", pgx_record.genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)
self.assertEqual("result_ready", pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual("HDR_RPT_UNINFORMATIVE", hdr_record_uninf.genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)
self.assertEqual("result_ready", hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual("HDR_RPT_POSITIVE", hdr_record_pos.genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)
self.assertEqual("result_ready", hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 1 PGX Viewed
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.opened_at",
run_id=1,
)
# 1 HDR Viewed
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.opened_at",
run_id=1,
)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
# Test correct data inserted
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual("pgx_v1", record.module_type)
else:
self.assertEqual("hdr_v1", record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual("result_viewed", record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
"Genomic-Metrics-File-Appointment-Events-Test.json")
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode("utf-8"))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:
controller.ingest_appointment_metrics_file(
file_path=test_file_path,
)
all_metrics = self.appointment_metrics_dao.get_all()
# should be 5 metric records for whats in json file
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))
self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))
self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))
self.assertTrue(all((obj.created is not None for obj in all_metrics)))
self.assertTrue(all((obj.modified is not None for obj in all_metrics)))
self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {
"event": "appointment_updated",
"eventAuthoredTime": "2022-09-16T17:18:38Z",
"participantId": f'P{summary.participantId}',
"messageBody": {
"module_type": "hdr",
"appointment_timestamp": "2022-09-19T19:30:00+00:00",
"id": 55,
"appointment_timezone": "America/Los_Angeles",
"location": "CA",
"contact_number": "18043704252",
"language": "en",
"source": "Color"
}
}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId,
appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',
file_path='test_file_path',
module_type='hdr',
event_authored_time=fake_date,
event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'
)
current_events = self.appointment_event_dao.get_all()
# should be 2 initial appointment events
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
# should be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
# should be 4 appointment events 2 initial + 2 added
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))
updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))
current_metrics = self.appointment_metrics_dao.get_all()
# should STILL be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=gror
)
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
# test notified not returned by query
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=2
)
self.data_generator.create_database_genomic_appointment(
message_record_id=5,
appointment_id=5,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation_error(self, email_mock):
email_mock.side_effect = ForbiddenError(mock.Mock(code=403))
mock_slack_handler = mock.MagicMock()
fake_date = parser.parse("2023-06-01T13:43:23")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(2):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.genomic_alert_slack = mock_slack_handler
controller.check_gcr_escalation(controller.job_id)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
with notified_dao.session() as session:
notification = session.query(
GenomicGCROutreachEscalationNotified
).filter(
GenomicGCROutreachEscalationNotified.participant_id == pids[0]
).one()
self.assertEqual(email_mock.call_count, 1)
self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)
self.assertEqual(False, notification.message_sent)
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
participantOrigin='careevolution'
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(
num_days=30,
participant_origin='careevolution'
)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)
# task SHOULD NOT be called
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)
# task SHOULD be called
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj
in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))
|
normal
|
{
"blob_id": "bd179fda18551d4f3d8a4d695a9da38ee607ef1d",
"index": 2168,
"step-1": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n <mask token>\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-2": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-3": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n <mask token>\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n <mask token>\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n '[email protected]')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n <mask token>\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n <mask token>\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n <mask token>\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-4": "<mask token>\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = '1' * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={'text': incident_message})\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n file_path_md5 = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum'\n )\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName='test_bucket', fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5,\n bucket_name)\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = 'test_bucket'\n file_path = (\n 'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'\n )\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='111111111', sampleId=\n '222222222222', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=gen_job_run.id,\n startTime=clock.CLOCK.now(), filePath='/test_file_path',\n bucketName=bucket_name, fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n gen_processed_file.id)\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name\n )\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.\n UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message,\n 'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'\n )\n\n def test_accession_data_files(self):\n test_bucket_baylor = 'fake-data-bucket-baylor'\n test_idat_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat'\n )\n test_vcf_file = (\n 'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz'\n )\n test_cram_file = (\n 'fake-data-bucket-baylor/Wgs_sample_raw_data/CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram'\n )\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n with clock.FakeClock(test_time):\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES\n ) as controller:\n controller.accession_data_files(file_path,\n test_bucket_baylor)\n inserted_files = self.data_file_dao.get_all()\n expected_idat = GenomicGcDataFile(id=1, created=test_time, modified\n =test_time, file_path=test_idat_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01_Grn.idat', file_type='Grn.idat',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_vcf = GenomicGcDataFile(id=2, created=test_time, modified=\n test_time, file_path=test_vcf_file, gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Genotyping_sample_raw_data', file_name=\n '204027270091_R02C01.vcf.gz', file_type='vcf.gz',\n identifier_type='chipwellbarcode', identifier_value=\n '204027270091_R02C01', ignore_flag=0)\n expected_cram = GenomicGcDataFile(id=3, created=test_time, modified\n =test_time, file_path=test_cram_file, gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor', file_prefix=\n 'Wgs_sample_raw_data/CRAMs_CRAIs', file_name=\n 'BCM_A100134256_21063006771_SIA0017196_1.cram', file_type=\n 'cram', identifier_type='sample_id', identifier_value=\n '21063006771', ignore_flag=0)\n expected_objs = {(0): expected_idat, (1): expected_vcf, (2):\n expected_cram}\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i\n ].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].\n created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].\n file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].\n file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i\n ].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].\n file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i]\n .gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type,\n inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value,\n inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i\n ].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].\n metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].\n modified)\n\n def test_updating_members_blocklists(self):\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n ids_should_be_updated = []\n for i in range(4):\n ids_should_be_updated.append(self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set.id,\n biobankId='100153482', sampleId='21042005280', genomeType=\n 'test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if\n i & 2 == 0 else 'N').id)\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_array', genomicWorkflowState=\n GenomicWorkflowState.AW0, ai_an='N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n created_members = self.member_dao.get_all()\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.\n blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in\n blocklisted].sort())\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in created_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in created_members if obj.genomeType ==\n 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResearch == 0 and obj.\n blockResearchReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in created_members if obj.\n genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0))\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n run_result = self.job_run_dao.get(1)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, biobankId='100153482', sampleId='21042005280',\n genomeType='test_investigation_one' if i & 2 != 0 else\n 'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N')\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS\n ) as controller:\n controller.update_members_blocklists()\n modified_members = self.member_dao.get_all()\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 0 and obj.\n blockResultsReason is None for obj in modified_members if obj.\n ai_an == 'Y' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResearch == 1 and obj.\n blockResearchReason is not None and obj.blockResearchReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n self.assertTrue(all(obj.blockResults == 1 and obj.\n blockResultsReason is not None and obj.blockResultsReason ==\n 'test_sample_swap' for obj in modified_members if obj.\n genomeType == 'test_investigation_one' and obj.\n genomicWorkflowState == GenomicWorkflowState.AW1))\n run_result = self.job_run_dao.get(2)\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.\n COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n file_ingester = GenomicFileIngester()\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n test_metrics_file = create_ingestion_test_file(test_file,\n bucket_name, sub_folder)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_metrics_file(metric_type='user_events',\n file_path=test_file_path)\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].\n split('P')[-1]) == pid, metrics_to_ingest['rows']))\n participant_ingested_metrics = list(filter(lambda x: x.\n participant_id == pid, metrics))\n self.assertEqual(len(file_metrics), len(\n participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in\n participant_ingested_metrics))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_reconcile_pdr_data(self, mock_cloud_task):\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n first_run = self.job_run_dao.get_all()\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.\n model_type.__tablename__)\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in\n first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n participant = self.data_generator.create_database_participant()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, biobankId='100153482', sampleId='21042005280',\n genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1))\n gen_processed_file = (self.data_generator.\n create_database_genomic_file_processed(runId=first_run[\n 0].id, startTime=clock.CLOCK.now(), filePath=\n f'test_file_path_{i}', bucketName='test_bucket',\n fileName='test_file_name'))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id)\n manifest = (self.data_generator.\n create_database_genomic_manifest_file(manifestTypeId=2,\n filePath=f'test_file_path_{i}'))\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id, feedbackRecordCount=2)\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId, event_name=\n 'test_event', run_id=1)\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1, event_type=\n 'informing_loop_decision', module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later', event_authored_time=clock\n .CLOCK.now())\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co', email_notification_sent=0, sample_id=\n 'sample_test', results_type='hdr',\n genomic_set_member_id=gen_member.id)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i, appointment_id=i, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(), source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id, participant_id=\n participant.participantId, module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now())\n self.data_generator.create_genomic_result_viewed(participant_id\n =participant.participantId, event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(), module_type=\n 'gem', sample_id=gen_member.sampleId)\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n affected_tables = ['genomic_set', 'genomic_set_member',\n 'genomic_job_run', 'genomic_file_processed',\n 'genomic_gc_validation_metrics', 'genomic_manifest_file',\n 'genomic_manifest_feedback', 'genomic_informing_loop',\n 'genomic_cvl_results_past_due', 'user_event_metrics',\n 'genomic_member_report_state', 'genomic_result_viewed',\n 'genomic_appointment_event']\n num_calls = len(affected_tables) + 1\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n bucket_name = 'test-bucket'\n aw1_file_name = (\n 'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')\n aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'\n aw2_file_name = (\n 'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')\n aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=\n clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)\n aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.\n SUCCESS)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n self.data_generator.create_database_genomic_aw1_raw(file_path=\n aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=\n 'A10001')\n self.data_generator.create_database_genomic_aw2_raw(file_path=\n aw2_manifest_path, biobank_id='A10001', sample_id='100001',\n biobankidsampleid='A10001_100001')\n aw1_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1, filePath=\n aw1_manifest_path, fileName=aw1_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw2_manifest_file = (self.data_generator.\n create_database_genomic_manifest_file(created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2, filePath=\n aw2_manifest_path, fileName=aw2_file_name, bucketName=\n bucket_name, recordCount=1, rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now()))\n aw1_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',\n bucketName=bucket_name, fileName=aw1_file_name))\n aw2_file_processed = (self.data_generator.\n create_database_genomic_file_processed(runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(), genomicManifestFileId=\n aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',\n bucketName=bucket_name, fileName=aw2_file_name))\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, biobankId='100153482', sampleId=\n '21042005280', genomeType='aou_wgs', genomicWorkflowState=\n GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id, genomicFileProcessedId=\n aw2_file_processed.id)\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS\n ) as controller:\n controller.retry_manifest_ingestions()\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n cloud_task_endpoint = ['ingest_aw1_manifest_task',\n 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj ==\n cloud_task_endpoint))\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = (self.data_generator.\n create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1))\n stored_sample = (self.data_generator.\n create_database_biobank_stored_sample(biobankId=summary\n .biobankId, biobankOrderIdentifier=self.fake.pyint()))\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic')\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId, participantId=\n summary.participantId, finalizedTime=plus_num)\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='1')\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId, system='2')\n member = (self.data_generator.\n create_database_genomic_set_member(genomicSetId=gen_set\n .id, participantId=summary.participantId, genomeType=\n config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood', collectionTubeId=\n stored_sample.biobankStoredSampleId))\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id, sexConcordance='True',\n drcFpConcordance='Pass', drcSexConcordance='Pass',\n processingStatus='Pass')\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for\n obj in current_set_members))\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants)\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [\n calculation_limit])\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n current_loops_set = [obj for obj in current_set_members if obj.\n informingLoopReadyFlag == 1 and obj.\n informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY\n ) as controller:\n controller.calculate_informing_loop_ready_flags()\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in\n current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for\n obj in current_set_members))\n members_for_ready_loop = (self.member_dao.\n get_members_for_informing_loop_ready())\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gen_job_run = self.data_generator.create_database_genomic_job_run(jobId\n =GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=\n gen_job_run.id if num % 2 == 0 else None)\n self.data_generator.create_database_genomic_set_member(genomicSetId\n =gen_set.id, participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=\n gen_job_run.id)\n pids.append(summary.participantId)\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,\n '[email protected]')\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n self.assertTrue(any('GEM' in call.args[0].subject for call in\n call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in\n call_args))\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n all_withdrawal_records = result_withdrawal_dao.get_all()\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in\n all_withdrawal_records))\n array_results = list(filter(lambda x: x.array_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(array_results), 2)\n cvl_results = list(filter(lambda x: x.cvl_results == 1,\n all_withdrawal_records))\n self.assertTrue(len(cvl_results), num_participants)\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS\n ) as controller:\n controller.check_results_withdrawals()\n self.assertEqual(email_mock.call_count, 2)\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS)\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT)\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id, participantId=summary.\n participantId, genomeType=config.GENOME_TYPE_ARRAY)\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n current_members = self.member_dao.get_all()\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = (GenomicWorkflowState.\n GEM_RPT_READY)\n self.member_dao.update(member)\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state == GenomicReportState.\n GEM_RPT_READY for obj in current_gem_report_states))\n self.assertTrue(all(obj.genomic_report_state_str ==\n GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states))\n self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states))\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.\n GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.NO_RESULTS)\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate()\n il_dao = GenomicInformingLoopDao()\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n for b in ['aou_array', 'aou_wgs']:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType=b)\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes', 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3', 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(), modified=clock.CLOCK.now(),\n participant_id=p + 1, created_at=datetime.datetime(2021,\n 12, 29, 0) + datetime.timedelta(hours=i), event_name=\n events[i], run_id=1, ignore_flag=0)\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i, event_type=\n 'informing_loop_started' if i == 0 else\n 'informing_loop_decision', module_type='gem',\n participant_id=p + 1, decision_value=decisions[i],\n sample_id=100 + p, event_authored_time=datetime.\n datetime(2021, 12, 29, 0) + datetime.timedelta(hours=i))\n self.data_generator.create_database_genomic_user_event_metrics(created\n =clock.CLOCK.now(), modified=clock.CLOCK.now(), participant_id=\n 6, created_at=datetime.datetime(2021, 12, 29, 0), event_name=\n 'gem.informing_loop.screen8_yes', run_id=1, ignore_flag=0)\n genomic_pipeline.reconcile_informing_loop_responses()\n pid_list = [1, 2, 3, 6]\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module='gem')\n for value in new_il_values:\n self.assertEqual('yes', value.decision_value)\n pid_list = [1, 2, 3, 4]\n for module in ['hdr', 'pgx']:\n new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,\n module=module)\n for value in new_il_values:\n self.assertEqual('no', value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.result_ready', run_id=1)\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.informative', run_id=1)\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.result_ready.uninformative',\n run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n self.assertEqual(5, len(states))\n pgx_records = [rec for rec in states if rec.module == 'pgx_v1']\n hdr_record_uninf = [rec for rec in states if rec.\n genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0\n ]\n hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==\n GenomicReportState.HDR_RPT_POSITIVE][0]\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.\n genomic_report_state)\n self.assertEqual('PGX_RPT_READY', pgx_record.\n genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.\n participant_id + 10)\n self.assertEqual('result_ready', pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.\n event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0),\n hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.\n genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.\n participant_id + 10)\n self.assertEqual('result_ready', hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.\n event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n self.data_generator.create_database_genomic_job_run(jobId=\n GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 +\n pid, biobankId=1 + pid)\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i, genomicSetId=1, biobankId=i,\n collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'\n )\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='pgx.opened_at', run_id=1)\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i, created_at=datetime.datetime(2022, 10,\n 6, 0), event_name='hdr.opened_at', run_id=1)\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n self.assertEqual(2, len(results))\n for record in results:\n if record.participant_id == 1:\n self.assertEqual('pgx_v1', record.module_type)\n else:\n self.assertEqual('hdr_v1', record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual('result_viewed', record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 0), record.\n first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n appointment_data = test_data.load_test_data_json(\n 'Genomic-Metrics-File-Appointment-Events-Test.json')\n appointment_data_str = json.dumps(appointment_data, indent=4)\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode('utf-8'))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST\n ) as controller:\n controller.ingest_appointment_metrics_file(file_path=test_file_path\n )\n all_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))\n self.assertTrue(all(obj.file_path == test_file_path for obj in\n all_metrics))\n self.assertTrue(all(obj.appointment_event is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.created is not None for obj in all_metrics))\n self.assertTrue(all(obj.modified is not None for obj in all_metrics))\n self.assertTrue(all(obj.module_type is not None for obj in all_metrics)\n )\n self.assertTrue(all(obj.event_authored_time is not None for obj in\n all_metrics))\n self.assertTrue(all(obj.event_type is not None for obj in all_metrics))\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.\n APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult ==\n GenomicSubProcessResult.SUCCESS)\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n missing_json = {'event': 'appointment_updated',\n 'eventAuthoredTime': '2022-09-16T17:18:38Z',\n 'participantId': f'P{summary.participantId}', 'messageBody':\n {'module_type': 'hdr', 'appointment_timestamp':\n '2022-09-19T19:30:00+00:00', 'id': 55,\n 'appointment_timezone': 'America/Los_Angeles', 'location':\n 'CA', 'contact_number': '18043704252', 'language': 'en',\n 'source': 'Color'}}\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()\n ), appointment_timezone='America/Los_Angeles', location\n ='123 address st', contact_number='17348675309',\n language='en')\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId, appointment_event=\n json.dumps(missing_json, indent=4) if num % 2 != 0 else\n 'foo', file_path='test_file_path', module_type='hdr',\n event_authored_time=fake_date, event_type=\n 'appointment_updated' if num % 2 != 0 else\n 'appointment_scheduled')\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 2)\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in\n current_metrics))\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE\n ) as controller:\n controller.reconcile_appointment_events_from_metrics()\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.\n APPOINTMENT_METRICS_RECONCILE)\n current_events = self.appointment_event_dao.get_all()\n self.assertEqual(len(current_events), 4)\n scheduled = list(filter(lambda x: x.event_type ==\n 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in\n scheduled))\n updated = list(filter(lambda x: x.event_type ==\n 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in\n updated))\n current_metrics = self.appointment_metrics_dao.get_all()\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in\n current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for\n obj in current_metrics))\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [\n '[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=gror)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num, appointment_id=num, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date,\n source='Color', appointment_timestamp=format_datetime(clock\n .CLOCK.now()), appointment_timezone='America/Los_Angeles',\n location='123 address st', contact_number='17348675309',\n language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED\n ) as controller:\n controller.check_appointments_gror_changed()\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=2)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5, appointment_id=5, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=\n summary.participantId, event_authored_time=fake_date, source=\n 'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(\n )), appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n changed_ppts = (self.appointment_event_dao.\n get_appointments_gror_changed())\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=14))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 14 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n <mask token>\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse('2022-09-01T13:43:23')\n fake_date2 = parser.parse('2022-09-02T14:14:00')\n fake_date3 = parser.parse('2022-09-03T15:15:00')\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [\n '[email protected]'])\n self.data_generator.create_database_genomic_set(genomicSetName=\n 'test', genomicSetCriteria='.', genomicSetVersion=1)\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1, consentForGenomicsROR=1)\n set_member = (self.data_generator.\n create_database_genomic_set_member(participantId=summary.\n participantId, genomicSetId=1, biobankId=1001,\n collectionTubeId=100, sampleId=10, genomeType='aou_wgs',\n participantOrigin='careevolution'))\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId, genomic_report_state=\n GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=\n set_member.id, module='hdr_v1', event_authored_time=fake_date)\n pids.append(summary.participantId)\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101, appointment_id=102, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [0], event_authored_time=fake_date, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102, appointment_id=103, event_type=\n 'appointment_completed', module_type='hdr', participant_id=pids\n [1], event_authored_time=fake_date, source='Color',\n appointment_timestamp=fake_date, appointment_timezone=\n 'America/Los_Angeles', location='123 address st',\n contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103, appointment_id=104, event_type=\n 'appointment_scheduled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date2, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104, appointment_id=104, event_type=\n 'appointment_cancelled', module_type='hdr', participant_id=pids\n [2], event_authored_time=fake_date3, source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles', location=\n '123 address st', contact_number='17348675309', language='en')\n notified_dao = GenomicDefaultBaseDao(model_type=\n GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{'participant_id': pids[4], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': True}, {'participant_id': pids[5], 'created':\n clock.CLOCK.now(), 'modified': clock.CLOCK.now(),\n 'message_sent': False}])\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = (self.report_state_dao.\n get_hdr_result_positive_no_appointment(num_days=30,\n participant_origin='careevolution'))\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION\n ) as controller:\n controller.check_gcr_escalation(controller.job_id)\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject,\n 'GCR Outreach 30 Day Escalation')\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch(\n 'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'\n )\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR\n )\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(\n job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.\n SUCCESS)\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(\n 'manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==\n 'genomic-generate-manifest')\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.\n SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in\n all_job_runs))\n",
"step-5": "import datetime\nimport json\n\nfrom dateutil import parser\nimport mock\nfrom python_http_client.exceptions import ForbiddenError\n\nfrom rdr_service import clock, config\nfrom rdr_service.api_util import open_cloud_file\nfrom rdr_service.clock import FakeClock\nfrom rdr_service.dao.database_utils import format_datetime\nfrom rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \\\n GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \\\n GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \\\n GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao\nfrom rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao\nfrom rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \\\n GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState\nfrom rdr_service.genomic.genomic_job_components import GenomicFileIngester\nfrom rdr_service.genomic.genomic_job_controller import GenomicJobController\nfrom rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\\\n GenomicGCROutreachEscalationNotified\nfrom rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline\nfrom rdr_service.participant_enums import WithdrawalStatus\nfrom tests import test_data\nfrom tests.genomics_tests.test_genomic_utils import create_ingestion_test_file\nfrom tests.helpers.unittest_base import BaseTestCase\n\n\nclass GenomicJobControllerTest(BaseTestCase):\n def setUp(self):\n super(GenomicJobControllerTest, self).setUp()\n self.data_file_dao = GenomicGcDataFileDao()\n self.event_data_dao = MessageBrokenEventDataDao()\n self.incident_dao = GenomicIncidentDao()\n self.member_dao = GenomicSetMemberDao()\n self.metrics_dao = GenomicGCValidationMetricsDao()\n self.user_event_metrics_dao = UserEventMetricsDao()\n self.job_run_dao = GenomicJobRunDao()\n self.report_state_dao = GenomicMemberReportStateDao()\n self.appointment_event_dao = GenomicAppointmentEventDao()\n self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()\n\n def test_incident_with_long_message(self):\n \"\"\"Make sure the length of incident messages doesn't cause issues when recording them\"\"\"\n incident_message = \"1\" * (GenomicIncident.message.type.length + 20)\n mock_slack_handler = mock.MagicMock()\n\n job_controller = GenomicJobController(job_id=1)\n job_controller.genomic_alert_slack = mock_slack_handler\n job_controller.create_incident(message=incident_message, slack=True)\n\n # Double check that the incident was saved successfully, with part of the message\n incident: GenomicIncident = self.session.query(GenomicIncident).one()\n self.assertTrue(incident_message.startswith(incident.message))\n\n # Make sure Slack received the full message\n mock_slack_handler.send_message_to_webhook.assert_called_with(\n message_data={\n 'text': incident_message\n }\n )\n\n def test_gvcf_files_ingestion(self):\n job_controller = GenomicJobController(job_id=38)\n bucket_name = \"test_bucket\"\n\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n file_path_md5 = \"Wgs_sample_raw_data/SS_VCF_research/\" \\\n \"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum\"\n\n full_path = f'{bucket_name}/{file_path}'\n full_path_md5 = f'{bucket_name}/{file_path_md5}'\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfMd5Path)\n self.assertEqual(metrics.gvcfMd5Path, full_path_md5)\n\n job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)\n\n self.assertIsNotNone(metrics.gvcfPath)\n self.assertEqual(metrics.gvcfPath, full_path)\n\n def test_gvcf_files_ingestion_create_incident(self):\n bucket_name = \"test_bucket\"\n file_path = \"Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"111111111\",\n sampleId=\"222222222222\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=gen_job_run.id,\n startTime=clock.CLOCK.now(),\n filePath='/test_file_path',\n bucketName=bucket_name,\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:\n controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)\n\n incident = self.incident_dao.get(1)\n self.assertIsNotNone(incident)\n self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)\n self.assertEqual(incident.data_file_path, file_path)\n self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '\n 'genomics metric record for sample id: '\n '21042005280')\n\n def test_accession_data_files(self):\n test_bucket_baylor = \"fake-data-bucket-baylor\"\n test_idat_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat\"\n test_vcf_file = \"fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz\"\n\n test_cram_file = \"fake-data-bucket-baylor/Wgs_sample_raw_data/\" \\\n \"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram\"\n\n test_files = [test_idat_file, test_vcf_file, test_cram_file]\n\n test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)\n\n # run job controller method on each file\n with clock.FakeClock(test_time):\n\n for file_path in test_files:\n with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:\n controller.accession_data_files(file_path, test_bucket_baylor)\n\n inserted_files = self.data_file_dao.get_all()\n\n # idat\n expected_idat = GenomicGcDataFile(\n id=1,\n created=test_time,\n modified=test_time,\n file_path=test_idat_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01_Grn.idat',\n file_type='Grn.idat',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # vcf\n expected_vcf = GenomicGcDataFile(\n id=2,\n created=test_time,\n modified=test_time,\n file_path=test_vcf_file,\n gc_site_id='jh',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Genotyping_sample_raw_data',\n file_name='204027270091_R02C01.vcf.gz',\n file_type='vcf.gz',\n identifier_type='chipwellbarcode',\n identifier_value='204027270091_R02C01',\n ignore_flag=0,\n )\n\n # cram\n expected_cram = GenomicGcDataFile(\n id=3,\n created=test_time,\n modified=test_time,\n file_path=test_cram_file,\n gc_site_id='bcm',\n bucket_name='fake-data-bucket-baylor',\n file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',\n file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',\n file_type='cram',\n identifier_type='sample_id',\n identifier_value='21063006771',\n ignore_flag=0,\n )\n\n # obj mapping\n expected_objs = {\n 0: expected_idat,\n 1: expected_vcf,\n 2: expected_cram\n }\n\n # verify test objects match expectations\n for i in range(3):\n self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)\n self.assertEqual(expected_objs[i].created, inserted_files[i].created)\n self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)\n self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)\n self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)\n self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)\n self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)\n self.assertEqual(expected_objs[i].id, inserted_files[i].id)\n self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)\n self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)\n self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)\n self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)\n self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)\n\n def test_updating_members_blocklists(self):\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n ids_should_be_updated = []\n # for just created and wf state query and MATCHES criteria\n for i in range(4):\n ids_should_be_updated.append(\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='Y' if i & 2 == 0 else 'N'\n ).id\n )\n\n # for just created and wf state query and DOES NOT MATCH criteria\n for i in range(2):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='aou_array',\n genomicWorkflowState=GenomicWorkflowState.AW0,\n ai_an='N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n # current config json in base_config.json\n created_members = self.member_dao.get_all()\n\n blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))\n self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # should NOT be RESEARCH/RESULTS blocked\n self.assertTrue(all(\n obj.blockResearch == 0 and obj.blockResearchReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW0)\n )\n\n # clear current set member records\n with self.member_dao.session() as session:\n session.query(GenomicSetMember).delete()\n\n run_result = self.job_run_dao.get(1)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n # for modified data query and MATCHES criteria\n for i in range(4):\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',\n genomicWorkflowState=GenomicWorkflowState.AW1,\n ai_an='Y' if i & 2 == 0 else 'N'\n )\n\n with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:\n controller.update_members_blocklists()\n\n modified_members = self.member_dao.get_all()\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should NOT be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 0 and obj.blockResultsReason is None\n for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)\n )\n\n # should be RESEARCH blocked\n self.assertTrue(all(\n obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n # should be RESULTS blocked\n self.assertTrue(all(\n obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'\n for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==\n GenomicWorkflowState.AW1)\n )\n\n run_result = self.job_run_dao.get(2)\n\n self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)\n\n def test_ingest_user_metrics_file(self):\n test_file = 'Genomic-Metrics-File-User-Events-Test.csv'\n bucket_name = 'test_bucket'\n sub_folder = 'user_events'\n pids = []\n\n file_ingester = GenomicFileIngester()\n\n for _ in range(2):\n pid = self.data_generator.create_database_participant()\n pids.append(pid.participantId)\n\n test_metrics_file = create_ingestion_test_file(\n test_file,\n bucket_name,\n sub_folder)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'\n\n with open_cloud_file(test_file_path) as csv_file:\n metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)\n\n with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:\n controller.ingest_metrics_file(\n metric_type='user_events',\n file_path=test_file_path,\n )\n\n job_run_id = controller.job_run.id\n metrics = self.user_event_metrics_dao.get_all()\n\n for pid in pids:\n file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[\n 'rows']))\n participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))\n\n self.assertEqual(len(file_metrics), len(participant_ingested_metrics))\n self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_reconcile_pdr_data(self, mock_cloud_task):\n\n # init new job run in __enter__\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n cloud_task_endpoint = 'rebuild_genomic_table_records_task'\n\n first_run = self.job_run_dao.get_all()\n\n self.assertEqual(mock_cloud_task.call_count, 1)\n call_args = mock_cloud_task.call_args_list\n\n self.assertEqual(len(call_args), 1)\n self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)\n\n self.assertTrue(type(call_args[0].args[0]['ids']) is list)\n self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])\n self.assertEqual(call_args[0].args[1], cloud_task_endpoint)\n\n participant = self.data_generator.create_database_participant()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)\n plus_ten = plus_ten.replace(microsecond=0)\n with FakeClock(plus_ten):\n for i in range(2):\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1\n )\n\n gen_processed_file = self.data_generator.create_database_genomic_file_processed(\n runId=first_run[0].id,\n startTime=clock.CLOCK.now(),\n filePath=f'test_file_path_{i}',\n bucketName='test_bucket',\n fileName='test_file_name',\n )\n\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=gen_processed_file.id\n )\n\n manifest = self.data_generator.create_database_genomic_manifest_file(\n manifestTypeId=2,\n filePath=f'test_file_path_{i}'\n )\n\n self.data_generator.create_database_genomic_manifest_feedback(\n inputManifestFileId=manifest.id,\n feedbackRecordCount=2\n )\n\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=participant.participantId,\n event_name='test_event',\n run_id=1,\n )\n\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=1,\n event_type='informing_loop_decision',\n module_type='gem',\n participant_id=participant.participantId,\n decision_value='maybe_later',\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_database_genomic_cvl_past_due(\n cvl_site_id='co',\n email_notification_sent=0,\n sample_id='sample_test',\n results_type='hdr',\n genomic_set_member_id=gen_member.id\n )\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=i,\n appointment_id=i,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=participant.participantId,\n event_authored_time=clock.CLOCK.now(),\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_member_report_state(\n genomic_set_member_id=gen_member.id,\n participant_id=participant.participantId,\n module='gem',\n genomic_report_state=GenomicReportState.GEM_RPT_READY,\n event_authored_time=clock.CLOCK.now()\n )\n\n self.data_generator.create_genomic_result_viewed(\n participant_id=participant.participantId,\n event_type='result_viewed',\n event_authored_time=clock.CLOCK.now(),\n module_type='gem',\n sample_id=gen_member.sampleId\n )\n\n # gets new records that were created with last job run from above\n with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:\n controller.reconcile_pdr_data()\n\n affected_tables = [\n 'genomic_set',\n 'genomic_set_member',\n 'genomic_job_run',\n 'genomic_file_processed',\n 'genomic_gc_validation_metrics',\n 'genomic_manifest_file',\n 'genomic_manifest_feedback',\n 'genomic_informing_loop',\n 'genomic_cvl_results_past_due',\n 'user_event_metrics',\n 'genomic_member_report_state',\n 'genomic_result_viewed',\n 'genomic_appointment_event'\n ]\n\n num_calls = len(affected_tables) + 1\n\n self.assertEqual(mock_cloud_task.call_count, num_calls)\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), num_calls)\n\n mock_tables = set([obj[0][0]['table'] for obj in call_args])\n mock_endpoint = [obj[0][1] for obj in call_args]\n\n self.assertTrue([mock_tables].sort() == affected_tables.sort())\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):\n\n bucket_name = \"test-bucket\"\n aw1_file_name = \"AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv\"\n aw1_manifest_path = f\"{bucket_name}/{aw1_file_name}\"\n\n aw2_file_name = \"AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv\"\n aw2_manifest_path = f\"{bucket_name}/{aw2_file_name}\"\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n # Create AW1 job_run\n aw1_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # Create AW2 job_run\n aw2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_INGESTION,\n startTime=clock.CLOCK.now(),\n endTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n # should have no data\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(3)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # Create genomic_aw1_raw record\n self.data_generator.create_database_genomic_aw1_raw(\n file_path=aw1_manifest_path,\n package_id=\"PKG-2104-026571\",\n biobank_id=\"A10001\",\n )\n\n # Create genomic_aw2_raw record\n self.data_generator.create_database_genomic_aw2_raw(\n file_path=aw2_manifest_path,\n biobank_id=\"A10001\",\n sample_id=\"100001\",\n biobankidsampleid=\"A10001_100001\",\n )\n\n # Create AW1 genomic_manifest_file record\n aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW1,\n filePath=aw1_manifest_path,\n fileName=aw1_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW2 genomic_manifest_file record\n aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n uploadDate=clock.CLOCK.now(),\n manifestTypeId=GenomicManifestTypes.AW2,\n filePath=aw2_manifest_path,\n fileName=aw2_file_name,\n bucketName=bucket_name,\n recordCount=1,\n rdrProcessingComplete=1,\n rdrProcessingCompleteDate=clock.CLOCK.now(),\n )\n\n # Create AW1 file_processed\n aw1_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw1_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw1_manifest_file.id,\n filePath=f\"/{aw1_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw1_file_name,\n )\n\n # Create AW2 file_processed\n aw2_file_processed = self.data_generator.create_database_genomic_file_processed(\n runId=aw2_job_run.id,\n startTime=clock.CLOCK.now(),\n genomicManifestFileId=aw2_manifest_file.id,\n filePath=f\"/{aw2_manifest_path}\",\n bucketName=bucket_name,\n fileName=aw2_file_name,\n )\n\n # genomic_set_member for AW1\n gen_member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n biobankId=\"100153482\",\n sampleId=\"21042005280\",\n genomeType=\"aou_wgs\",\n genomicWorkflowState=GenomicWorkflowState.AW1,\n aw1FileProcessedId=aw1_file_processed.id\n )\n\n # genomic_gc_validation_metrics for AW1\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=gen_member.id,\n genomicFileProcessedId=aw2_file_processed.id\n )\n\n # one AW1/AW2 with no deltas\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(4)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)\n\n self.assertEqual(mock_cloud_task.call_count, 0)\n self.assertFalse(mock_cloud_task.call_count)\n\n # empty tables resulting in deltas and cloud task calls\n with self.member_dao.session() as session:\n session.query(GenomicGCValidationMetrics).delete()\n session.query(GenomicSetMember).delete()\n\n with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:\n controller.retry_manifest_ingestions()\n\n job_run = self.job_run_dao.get(5)\n self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)\n self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)\n self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)\n\n # one AW1/AW2 with deltas\n self.assertEqual(mock_cloud_task.call_count, 2)\n self.assertTrue(mock_cloud_task.call_count)\n\n call_args = mock_cloud_task.call_args_list\n self.assertEqual(len(call_args), 2)\n\n cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']\n mock_endpoint = [obj[0][1] for obj in call_args]\n self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))\n\n mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])\n self.assertTrue(len(mock_buckets), 1)\n self.assertTrue(list(mock_buckets)[0] == bucket_name)\n\n def test_calculate_informing_loop_ready_flags(self):\n num_participants = 4\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n for num in range(num_participants):\n plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)\n plus_num = plus_num.replace(microsecond=0)\n with FakeClock(plus_num):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n stored_sample = self.data_generator.create_database_biobank_stored_sample(\n biobankId=summary.biobankId,\n biobankOrderIdentifier=self.fake.pyint()\n )\n collection_site = self.data_generator.create_database_site(\n siteType='Clinic'\n )\n order = self.data_generator.create_database_biobank_order(\n collectedSiteId=collection_site.siteId,\n participantId=summary.participantId,\n finalizedTime=plus_num\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"1\",\n )\n self.data_generator.create_database_biobank_order_identifier(\n value=stored_sample.biobankOrderIdentifier,\n biobankOrderId=order.biobankOrderId,\n system=\"2\",\n )\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n qcStatus=GenomicQcStatus.PASS,\n gcManifestSampleSource='Whole Blood',\n collectionTubeId=stored_sample.biobankStoredSampleId\n )\n self.data_generator.create_database_genomic_gc_validation_metrics(\n genomicSetMemberId=member.id,\n sexConcordance='True',\n drcFpConcordance='Pass',\n drcSexConcordance='Pass',\n processingStatus='Pass'\n )\n\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n # no config object, controller method should return\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants)\n\n calculation_limit = 2\n config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1\n and obj.informingLoopReadyFlagModified is not None]\n self.assertEqual(len(current_loops_set), calculation_limit)\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), num_participants // 2)\n\n with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:\n controller.calculate_informing_loop_ready_flags()\n\n current_set_members = self.member_dao.get_all()\n self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))\n self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))\n\n members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()\n self.assertEqual(len(members_for_ready_loop), 0)\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_getting_results_withdrawn(self, email_mock):\n num_participants = 4\n result_withdrawal_dao = GenomicResultWithdrawalsDao()\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n gen_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.AW1_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids = []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY,\n gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None\n )\n\n self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_WGS,\n cvlW1ilHdrJobRunId=gen_job_run.id\n )\n\n pids.append(summary.participantId)\n\n config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, '[email protected]')\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should be two => 1 GEM 1 HEALTH\n self.assertEqual(email_mock.call_count, 2)\n call_args = email_mock.call_args_list\n\n self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))\n self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n all_withdrawal_records = result_withdrawal_dao.get_all()\n\n self.assertTrue(len(all_withdrawal_records) == len(pids))\n self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))\n\n array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))\n\n # should only be 2\n self.assertTrue(len(array_results), 2)\n\n cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))\n\n # should be 4 for num of participants\n self.assertTrue(len(cvl_results), num_participants)\n\n with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:\n controller.check_results_withdrawals()\n\n # mock checks should still be two on account of no records\n self.assertEqual(email_mock.call_count, 2)\n\n job_runs = self.job_run_dao.get_all()\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]\n\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n def test_gem_results_to_report_state(self):\n num_participants = 8\n\n gen_set = self.data_generator.create_database_genomic_set(\n genomicSetName=\".\",\n genomicSetCriteria=\".\",\n genomicSetVersion=1\n )\n\n gem_a2_job_run = self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.GEM_A2_MANIFEST,\n startTime=clock.CLOCK.now(),\n runResult=GenomicSubProcessResult.SUCCESS\n )\n\n pids_to_update, member_ids = [], []\n for num in range(num_participants):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1,\n withdrawalStatus=WithdrawalStatus.EARLY_OUT\n )\n\n member = self.data_generator.create_database_genomic_set_member(\n genomicSetId=gen_set.id,\n participantId=summary.participantId,\n genomeType=config.GENOME_TYPE_ARRAY\n )\n\n if num % 2 == 0:\n member_ids.append(member.id)\n pids_to_update.append(summary.participantId)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 2)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n current_members = self.member_dao.get_all()\n\n # 4 members updated correctly should return\n for member in current_members:\n if member.participantId in pids_to_update:\n member.gemA2ManifestJobRunId = gem_a2_job_run.id\n member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY\n self.member_dao.update(member)\n\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 3)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n current_gem_report_states = self.report_state_dao.get_all()\n self.assertEqual(len(current_gem_report_states), len(pids_to_update))\n self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))\n self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))\n self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))\n self.assertTrue(\n all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in\n current_gem_report_states)\n )\n self.assertTrue(\n all(obj.genomic_set_member_id in member_ids for obj in\n current_gem_report_states)\n )\n\n # 4 members inserted already should not return\n with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:\n controller.gem_results_to_report_state()\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 4)\n\n current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)\n\n self.clear_table_after_test('genomic_member_report_state')\n\n def test_reconcile_informing_loop(self):\n event_dao = UserEventMetricsDao()\n event_dao.truncate() # for test suite\n il_dao = GenomicInformingLoopDao()\n\n for pid in range(8):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # insert set members\n for b in [\"aou_array\", \"aou_wgs\"]:\n for i in range(1, 9):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=b,\n )\n\n # Set up ingested metrics data\n events = ['gem.informing_loop.started',\n 'gem.informing_loop.screen8_no',\n 'gem.informing_loop.screen8_yes',\n 'hdr.informing_loop.started',\n 'gem.informing_loop.screen3',\n 'pgx.informing_loop.screen8_no',\n 'hdr.informing_loop.screen10_no']\n\n for p in range(4):\n for i in range(len(events)):\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=p + 1,\n created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),\n event_name=events[i],\n run_id=1,\n ignore_flag=0,\n )\n # Set up informing loop from message broker records\n decisions = [None, 'no', 'yes']\n for p in range(3):\n for i in range(2):\n self.data_generator.create_database_genomic_informing_loop(\n message_record_id=i,\n event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',\n module_type='gem',\n participant_id=p + 1,\n decision_value=decisions[i],\n sample_id=100 + p,\n event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)\n )\n\n # Test for no message but yes user event\n self.data_generator.create_database_genomic_user_event_metrics(\n created=clock.CLOCK.now(),\n modified=clock.CLOCK.now(),\n participant_id=6,\n created_at=datetime.datetime(2021, 12, 29, 00),\n event_name='gem.informing_loop.screen8_yes',\n run_id=1,\n ignore_flag=0,\n )\n\n # Run reconcile job\n genomic_pipeline.reconcile_informing_loop_responses()\n\n # Test mismatched GEM data ingested correctly\n pid_list = [1, 2, 3, 6]\n\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=\"gem\"\n )\n\n for value in new_il_values:\n self.assertEqual(\"yes\", value.decision_value)\n\n pid_list = [1, 2, 3, 4]\n for module in [\"hdr\", \"pgx\"]:\n new_il_values = il_dao.get_latest_il_for_pids(\n pid_list=pid_list,\n module=module\n )\n\n for value in new_il_values:\n self.assertEqual(\"no\", value.decision_value)\n self.assertIsNotNone(value.created_from_metric_id)\n\n def test_reconcile_message_broker_results_ready(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(7):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 6):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 3 PGX records\n if i < 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.result_ready\",\n run_id=1,\n )\n\n # 1 HDR Positive\n if i == 4:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.informative\",\n run_id=1,\n )\n\n # 1 HDR uninformative\n if i == 5:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.result_ready.uninformative\",\n run_id=1,\n )\n\n # Run job\n genomic_cvl_pipeline.reconcile_message_broker_results_ready()\n\n # Test correct data inserted\n report_state_dao = GenomicMemberReportStateDao()\n states = report_state_dao.get_all()\n\n self.assertEqual(5, len(states))\n\n pgx_records = [rec for rec in states if rec.module == \"pgx_v1\"]\n hdr_record_uninf = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]\n\n hdr_record_pos = [rec for rec in states\n if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]\n\n for pgx_record in pgx_records:\n self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)\n self.assertEqual(\"PGX_RPT_READY\", pgx_record.genomic_report_state_str)\n self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)\n self.assertEqual(\"result_ready\", pgx_record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)\n self.assertIsNotNone(pgx_record.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_UNINFORMATIVE\", hdr_record_uninf.genomic_report_state_str)\n self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_uninf.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)\n self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)\n\n self.assertEqual(\"HDR_RPT_POSITIVE\", hdr_record_pos.genomic_report_state_str)\n self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)\n self.assertEqual(\"result_ready\", hdr_record_pos.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)\n self.assertIsNotNone(hdr_record_pos.created_from_metric_id)\n\n def test_reconcile_message_broker_results_viewed(self):\n # Create Test Participants' data\n # create genomic set\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n # Set up initial job run ID\n self.data_generator.create_database_genomic_job_run(\n jobId=GenomicJob.METRICS_FILE_INGEST,\n startTime=clock.CLOCK.now()\n )\n\n for pid in range(3):\n self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)\n\n # insert set members and event metrics records\n for i in range(1, 3):\n self.data_generator.create_database_genomic_set_member(\n participantId=i,\n genomicSetId=1,\n biobankId=i,\n collectionTubeId=100 + i,\n sampleId=10 + i,\n genomeType=\"aou_wgs\",\n )\n\n # 1 PGX Viewed\n if i == 1:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"pgx.opened_at\",\n run_id=1,\n )\n\n # 1 HDR Viewed\n if i == 2:\n self.data_generator.create_database_genomic_user_event_metrics(\n participant_id=i,\n created_at=datetime.datetime(2022, 10, 6, 00),\n event_name=\"hdr.opened_at\",\n run_id=1,\n )\n\n genomic_cvl_pipeline.reconcile_message_broker_results_viewed()\n\n # Test correct data inserted\n result_viewed_dao = GenomicResultViewedDao()\n results = result_viewed_dao.get_all()\n\n self.assertEqual(2, len(results))\n\n for record in results:\n if record.participant_id == 1:\n self.assertEqual(\"pgx_v1\", record.module_type)\n else:\n self.assertEqual(\"hdr_v1\", record.module_type)\n self.assertEqual(int(record.sample_id), record.participant_id + 10)\n self.assertEqual(\"result_viewed\", record.event_type)\n self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)\n self.assertIsNotNone(record.created_from_metric_id)\n\n def test_ingest_appointment_metrics_file(self):\n test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'\n bucket_name = 'test_bucket'\n sub_folder = 'appointment_events'\n pids = []\n\n for _ in range(4):\n summary = self.data_generator.create_database_participant_summary()\n pids.append(summary.participantId)\n\n test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'\n\n appointment_data = test_data.load_test_data_json(\n \"Genomic-Metrics-File-Appointment-Events-Test.json\")\n appointment_data_str = json.dumps(appointment_data, indent=4)\n\n with open_cloud_file(test_file_path, mode='wb') as cloud_file:\n cloud_file.write(appointment_data_str.encode(\"utf-8\"))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:\n controller.ingest_appointment_metrics_file(\n file_path=test_file_path,\n )\n\n all_metrics = self.appointment_metrics_dao.get_all()\n\n # should be 5 metric records for whats in json file\n self.assertEqual(len(all_metrics), 5)\n self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))\n self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))\n self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))\n self.assertTrue(all((obj.created is not None for obj in all_metrics)))\n self.assertTrue(all((obj.modified is not None for obj in all_metrics)))\n self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))\n self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))\n\n current_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(current_job_runs), 1)\n\n current_job_run = current_job_runs[0]\n self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)\n self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n def test_reconcile_appointments_with_metrics(self):\n fake_date = parser.parse('2020-05-29T08:00:01-05:00')\n\n for num in range(4):\n summary = self.data_generator.create_database_participant_summary()\n\n missing_json = {\n \"event\": \"appointment_updated\",\n \"eventAuthoredTime\": \"2022-09-16T17:18:38Z\",\n \"participantId\": f'P{summary.participantId}',\n \"messageBody\": {\n \"module_type\": \"hdr\",\n \"appointment_timestamp\": \"2022-09-19T19:30:00+00:00\",\n \"id\": 55,\n \"appointment_timezone\": \"America/Los_Angeles\",\n \"location\": \"CA\",\n \"contact_number\": \"18043704252\",\n \"language\": \"en\",\n \"source\": \"Color\"\n }\n }\n\n if num % 2 == 0:\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n self.data_generator.create_database_genomic_appointment_metric(\n participant_id=summary.participantId,\n appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',\n file_path='test_file_path',\n module_type='hdr',\n event_authored_time=fake_date,\n event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'\n )\n\n current_events = self.appointment_event_dao.get_all()\n # should be 2 initial appointment events\n self.assertEqual(len(current_events), 2)\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))\n\n with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:\n controller.reconcile_appointment_events_from_metrics()\n\n job_run = self.job_run_dao.get_all()\n self.assertEqual(len(job_run), 1)\n self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)\n\n current_events = self.appointment_event_dao.get_all()\n # should be 4 appointment events 2 initial + 2 added\n self.assertEqual(len(current_events), 4)\n\n scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))\n self.assertEqual(len(scheduled), 2)\n self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))\n\n updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))\n self.assertEqual(len(updated), 2)\n self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))\n\n current_metrics = self.appointment_metrics_dao.get_all()\n # should STILL be 4 initial appointment events\n self.assertEqual(len(current_metrics), 4)\n self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))\n self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))\n\n self.clear_table_after_test('genomic_appointment_event_metrics')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_appointments_gror_changed(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n notified_dao = GenomicAppointmentEventNotifiedDao()\n config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['[email protected]'])\n num_participants = 4\n for num in range(num_participants):\n gror = num if num > 1 else 1\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=gror\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=num,\n appointment_id=num,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(2, len(changed_ppts))\n with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:\n controller.check_appointments_gror_changed()\n\n self.assertEqual(email_mock.call_count, 1)\n notified_appointments = notified_dao.get_all()\n self.assertEqual(2, len(notified_appointments))\n\n # test notified not returned by query\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=2\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=5,\n appointment_id=5,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=summary.participantId,\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()\n self.assertEqual(1, len(changed_ppts))\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)\n results = [pid[0] for pid in escalated_participants]\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_14day_escalation_error(self, email_mock):\n email_mock.side_effect = ForbiddenError(mock.Mock(code=403))\n mock_slack_handler = mock.MagicMock()\n\n fake_date = parser.parse(\"2023-06-01T13:43:23\")\n\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n\n pids = []\n for _ in range(2):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:\n controller.genomic_alert_slack = mock_slack_handler\n controller.check_gcr_escalation(controller.job_id)\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n with notified_dao.session() as session:\n notification = session.query(\n GenomicGCROutreachEscalationNotified\n ).filter(\n GenomicGCROutreachEscalationNotified.participant_id == pids[0]\n ).one()\n\n self.assertEqual(email_mock.call_count, 1)\n self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)\n self.assertEqual(False, notification.message_sent)\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.services.email_service.EmailService.send_email')\n def test_check_gcr_ce_escalation(self, email_mock):\n fake_date = parser.parse(\"2022-09-01T13:43:23\")\n fake_date2 = parser.parse(\"2022-09-02T14:14:00\")\n fake_date3 = parser.parse(\"2022-09-03T15:15:00\")\n config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])\n self.data_generator.create_database_genomic_set(\n genomicSetName='test',\n genomicSetCriteria='.',\n genomicSetVersion=1\n )\n pids = []\n for _ in range(6):\n summary = self.data_generator.create_database_participant_summary(\n consentForStudyEnrollment=1,\n consentForGenomicsROR=1\n )\n set_member = self.data_generator.create_database_genomic_set_member(\n participantId=summary.participantId,\n genomicSetId=1,\n biobankId=1001,\n collectionTubeId=100,\n sampleId=10,\n genomeType=\"aou_wgs\",\n participantOrigin='careevolution'\n )\n self.data_generator.create_database_genomic_member_report_state(\n participant_id=summary.participantId,\n genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,\n genomic_set_member_id=set_member.id,\n module='hdr_v1',\n event_authored_time=fake_date\n )\n pids.append(summary.participantId)\n\n # Appointment scheduled in future: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=101,\n appointment_id=102,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[0],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment completed: don't notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=102,\n appointment_id=103,\n event_type='appointment_completed',\n module_type='hdr',\n participant_id=pids[1],\n event_authored_time=fake_date,\n source='Color',\n appointment_timestamp=fake_date,\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n # Appointment scheduled then canceled: notify\n self.data_generator.create_database_genomic_appointment(\n message_record_id=103,\n appointment_id=104,\n event_type='appointment_scheduled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date2,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n self.data_generator.create_database_genomic_appointment(\n message_record_id=104,\n appointment_id=104,\n event_type='appointment_cancelled',\n module_type='hdr',\n participant_id=pids[2],\n event_authored_time=fake_date3,\n source='Color',\n appointment_timestamp=format_datetime(clock.CLOCK.now()),\n appointment_timezone='America/Los_Angeles',\n location='123 address st',\n contact_number='17348675309',\n language='en'\n )\n\n notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)\n notified_dao.insert_bulk([{\n 'participant_id': pids[4],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': True\n },{\n 'participant_id': pids[5],\n 'created': clock.CLOCK.now(),\n 'modified': clock.CLOCK.now(),\n 'message_sent': False\n }])\n\n with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):\n escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(\n num_days=30,\n participant_origin='careevolution'\n )\n results = [pid[0] for pid in escalated_participants]\n\n self.assertIn(pids[2], results)\n self.assertIn(pids[3], results)\n self.assertIn(pids[5], results)\n self.assertNotIn(pids[0], results)\n self.assertNotIn(pids[1], results)\n self.assertNotIn(pids[4], results)\n\n with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:\n controller.check_gcr_escalation(controller.job_id)\n\n self.assertEqual(email_mock.call_count, 3)\n self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')\n\n self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')\n\n @mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')\n def test_execute_auto_generation_from_last_run(self, cloud_task_mock):\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.ERROR\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)\n\n # task SHOULD NOT be called\n self.assertEqual(cloud_task_mock.called, False)\n self.assertEqual(cloud_task_mock.call_count, 0)\n\n with GenomicJobController(\n GenomicJob.PR_PR_WORKFLOW\n ) as controller:\n controller.job_result = GenomicSubProcessResult.SUCCESS\n controller._end_run()\n controller.execute_auto_generation_from_cloud_task()\n\n last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)\n self.assertTrue(last_job_run_status is not None)\n self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)\n\n # task SHOULD be called\n self.assertEqual(cloud_task_mock.called, True)\n self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')\n self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')\n\n all_job_runs = self.job_run_dao.get_all()\n self.assertEqual(len(all_job_runs), 2)\n self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj\n in all_job_runs))\n self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))\n\n",
"step-ids": [
9,
13,
17,
22,
25
]
}
|
[
9,
13,
17,
22,
25
] |
#!/usr/bin/python
class Bob(object):
def __init__(self):
self.question_response = "Sure."
self.yell_response = "Woah, chill out!"
self.silent_response = "Fine. Be that way!"
self.whatever = "Whatever."
def hey(self, question):
if not(question) or question.strip()=='':
return self.silent_response
if question.isupper():
return self.yell_response
elif question.endswith("?"):
return self.question_response
return self.whatever
|
normal
|
{
"blob_id": "7ff7da216bdda5c30bf7c973c82886035b31247c",
"index": 4093,
"step-1": "<mask token>\n",
"step-2": "class Bob(object):\n <mask token>\n <mask token>\n",
"step-3": "class Bob(object):\n <mask token>\n\n def hey(self, question):\n if not question or question.strip() == '':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith('?'):\n return self.question_response\n return self.whatever\n",
"step-4": "class Bob(object):\n\n def __init__(self):\n self.question_response = 'Sure.'\n self.yell_response = 'Woah, chill out!'\n self.silent_response = 'Fine. Be that way!'\n self.whatever = 'Whatever.'\n\n def hey(self, question):\n if not question or question.strip() == '':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith('?'):\n return self.question_response\n return self.whatever\n",
"step-5": "#!/usr/bin/python\n\nclass Bob(object):\n def __init__(self):\n self.question_response = \"Sure.\"\n self.yell_response = \"Woah, chill out!\"\n self.silent_response = \"Fine. Be that way!\"\n self.whatever = \"Whatever.\"\n\n def hey(self, question):\n if not(question) or question.strip()=='':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith(\"?\"):\n return self.question_response\n return self.whatever\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Utilities for calculations based on antenna positions,
such as baseline and phase factor.
"""
import os
import numpy as np
import pickle
c = 299792458 # m / s
data_prefix = os.path.dirname(os.path.abspath(__file__)) + "/"
try:
ant_pos = dict(pickle.load(open(data_prefix + "ant_dict.pk", "rb")))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151e6):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2] # kill w
br = np.dot(b, r)
return np.exp(-2j * np.pi * nu * br / c)
except FileNotFoundError:
print("Failure to load antennae data.")
|
normal
|
{
"blob_id": "c455263b82c04fe2c5cc1e614f10a9962795f87e",
"index": 4349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-3": "<mask token>\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nimport pickle\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-5": "\"\"\"\nUtilities for calculations based on antenna positions,\nsuch as baseline and phase factor.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pickle\n\nc = 299792458 # m / s\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + \"/\"\n\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + \"ant_dict.pk\", \"rb\")))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151e6):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2] # kill w\n\n br = np.dot(b, r)\n return np.exp(-2j * np.pi * nu * br / c)\n \nexcept FileNotFoundError:\n print(\"Failure to load antennae data.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# python2.7
#formats for oracle lists
import pyperclip
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if (i+1) < len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","") + str('\',')
elif (i+1) == len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","")+ '\''
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
|
normal
|
{
"blob_id": "454fd88af552d7a46cb39167f21d641420973959",
"index": 2312,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\n<mask token>\npyperclip.copy(text)\n",
"step-3": "<mask token>\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-4": "import pyperclip\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-5": "# python2.7\r\n#formats for oracle lists\r\n\r\nimport pyperclip\r\ntext = str(pyperclip.paste()).strip()\r\n\r\nlines = text.split('\\n')\r\nfor i in range(len(lines)):\r\n if (i+1) < len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\") + str('\\',')\r\n elif (i+1) == len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\")+ '\\''\r\ntext = '(' + '\\n'.join(lines) + ')'\r\n\r\npyperclip.copy(text)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class User():
def __init__(self, first, last, gender, age):
self.first_name = first
self.last_name = last
self.gender = gender
self.age = age
self.full_name = self.first_name + " " + self.last_name
def describe_user(self):
print("The name of the user is " + self.full_name + ".")
print("The user's gender is " + self.gender + ".")
print("The user is " + str(self.age) + " years old.")
def greet_user(self):
if self.gender.lower() == "male":
print("Greetings, Mr. " + self.last_name.title() + "!")
elif self.gender.lower() == "female":
print("Greetings, Miss " + self.last_name.title() + "!")
user1 = User("zhichao", "li", "male", 27)
user2 = User("juan", "zhang", "female", 28)
user3 = User("Tian", "ZHANG", "male", 26)
user1.describe_user()
user1.greet_user()
user2.describe_user()
user2.greet_user()
user3.describe_user()
user3.greet_user()
|
normal
|
{
"blob_id": "93b712c60ba4bfa81d967ec59035b6fb7793ce87",
"index": 1974,
"step-1": "class User:\n <mask token>\n <mask token>\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-2": "class User:\n <mask token>\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-3": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\n<mask token>\n",
"step-4": "class User:\n\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + ' ' + self.last_name\n\n def describe_user(self):\n print('The name of the user is ' + self.full_name + '.')\n print(\"The user's gender is \" + self.gender + '.')\n print('The user is ' + str(self.age) + ' years old.')\n\n def greet_user(self):\n if self.gender.lower() == 'male':\n print('Greetings, Mr. ' + self.last_name.title() + '!')\n elif self.gender.lower() == 'female':\n print('Greetings, Miss ' + self.last_name.title() + '!')\n\n\nuser1 = User('zhichao', 'li', 'male', 27)\nuser2 = User('juan', 'zhang', 'female', 28)\nuser3 = User('Tian', 'ZHANG', 'male', 26)\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-5": "class User():\n def __init__(self, first, last, gender, age):\n self.first_name = first\n self.last_name = last\n self.gender = gender\n self.age = age\n self.full_name = self.first_name + \" \" + self.last_name\n\n def describe_user(self):\n print(\"The name of the user is \" + self.full_name + \".\")\n print(\"The user's gender is \" + self.gender + \".\")\n print(\"The user is \" + str(self.age) + \" years old.\")\n\n def greet_user(self):\n if self.gender.lower() == \"male\":\n print(\"Greetings, Mr. \" + self.last_name.title() + \"!\")\n elif self.gender.lower() == \"female\":\n print(\"Greetings, Miss \" + self.last_name.title() + \"!\")\n\n\nuser1 = User(\"zhichao\", \"li\", \"male\", 27)\nuser2 = User(\"juan\", \"zhang\", \"female\", 28)\nuser3 = User(\"Tian\", \"ZHANG\", \"male\", 26)\n\nuser1.describe_user()\nuser1.greet_user()\nuser2.describe_user()\nuser2.greet_user()\nuser3.describe_user()\nuser3.greet_user()\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
from django.http import HttpResponsePermanentRedirect
from django.urls import is_valid_path
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import escape_leading_slashes
class AppendSlashMiddleware(MiddlewareMixin):
response_redirect_class = HttpResponsePermanentRedirect
def process_request(self, request):
redirect_url = ''
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
if request.path_info.endswith('/'):
return False
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
def get_full_path_with_slash(self, request):
new_path = request.get_full_path(force_append_slash=True)
return escape_leading_slashes(new_path)
def process_response(self, request, response):
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(
self.get_full_path_with_slash(request))
if not response.streaming and \
not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
return response
|
normal
|
{
"blob_id": "ec70fb9119b430dcd36549f2fac8e5e0a0e1bb00",
"index": 2696,
"step-1": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n <mask token>\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n <mask token>\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-2": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n <mask token>\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-3": "<mask token>\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-4": "from django.http import HttpResponsePermanentRedirect\nfrom django.urls import is_valid_path\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.http import escape_leading_slashes\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n urlconf = getattr(request, 'urlconf', None)\n return not is_valid_path(request.path_info, urlconf) and is_valid_path(\n '%s/' % request.path_info, urlconf)\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.\n get_full_path_with_slash(request))\n if not response.streaming and not response.has_header('Content-Length'\n ):\n response['Content-Length'] = str(len(response.content))\n return response\n",
"step-5": "\nfrom django.http import HttpResponsePermanentRedirect\nfrom django.urls import is_valid_path\nfrom django.utils.deprecation import MiddlewareMixin\nfrom django.utils.http import escape_leading_slashes\n\n\nclass AppendSlashMiddleware(MiddlewareMixin):\n response_redirect_class = HttpResponsePermanentRedirect\n\n def process_request(self, request):\n redirect_url = ''\n\n if self.should_redirect_with_slash(request):\n path = self.get_full_path_with_slash(request)\n else:\n path = request.get_full_path()\n\n if redirect_url or path != request.get_full_path():\n redirect_url += path\n return self.response_redirect_class(redirect_url)\n\n def should_redirect_with_slash(self, request):\n if request.path_info.endswith('/'):\n return False\n\n urlconf = getattr(request, 'urlconf', None)\n return (\n not is_valid_path(request.path_info, urlconf) and\n is_valid_path('%s/' % request.path_info, urlconf)\n )\n\n def get_full_path_with_slash(self, request):\n new_path = request.get_full_path(force_append_slash=True)\n return escape_leading_slashes(new_path)\n\n def process_response(self, request, response):\n if response.status_code == 404:\n if self.should_redirect_with_slash(request):\n return self.response_redirect_class(\n self.get_full_path_with_slash(request))\n\n if not response.streaming and \\\n not response.has_header('Content-Length'):\n response['Content-Length'] = str(len(response.content))\n\n return response\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Original code from http://www.pythonforbeginners.com/code-snippets-source-code/port-scanner-in-python
#!/usr/bin/env python
# modules
import threading
import socket
import subprocess
import sys
import time
import scapy
from threading import Thread, Lock
from queue import Queue
from datetime import datetime
from logging import getLogger, ERROR
getLogger("scapy.runtime") .setLevel (ERROR)
from scapy.all import *
subprocess.call('clear', shell=True)
# print_lock = threading.Lock() - WIP, threading not implemented yet.
# Enter target host and port range
target = input("Enter a remote host to scan: ")
targetIP = socket.gethostbyname(target)
startPort = int(input("Enter the start port to scan: "))
endPort = int(input("Enter the end port to scan: "))
# Setting some values
ports = range(int(startPort), int(endPort)+1)
t1 = datetime.now()
SYNACK = 0x12
RSTACK = 0x14
# Banner displaying which host is being scanned
print ("-" * 60)
print ("Please wait, scanning remote host...", targetIP)
localtime = time.asctime(time.localtime())
print ("Scan started at: ", localtime)
def checkhost(ip):
conf.verb = 0
try:
ping = sr1(IP(dst = ip)/ICMP())
print ("\n[*] Target is up, beginning scan...") #this text isn't displayed - why?
except Exception:
print ("\n[!] Couldn't resolve target")
sys.exit("Exiting...")
print ("-" * 60)
def scanport(port):
startPort = RandShort() # scapy func that generates a small random nr to use as a source port.
conf.verb = 0 # prevents output from sending pkts from being printed to the screen.
SYNACKpkt = sr1(IP(dst = target)/TCP(sport = startPort, endPort = port, flags = "S")) # Scapy func sr1() used to craft & send a SYN pkt .
pktflags = SYNACKpkt.getlayer(TCP).flags
if pktflags == SYNACK:
return True
else:
return False
RSTpkt = IP(dst = target)/TCP(sport = startPort, endPort = port, flags = "R")
send(RSTpkt)
# Error handling
try:
for port in range(int(startPort), int(endPort)+1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((targetIP, port))
if result == 0:
print ("Port {}: [+] Open".format(port))
elif result != 0:
print ("Port {}: [-] Closed".format(port))
sock.close()
except KeyboardInterrupt:
sys.exit("You pressed Ctrl+C")
except socket.gaierror:
sys.exit("Hostname could not be resolved. Exiting")
except socket.error:
sys.exit("Couldn't connect to server")
t2 = datetime.now()
# Calculates the difference in time, to see how long it took to run the script
total = t2 - t1
print ("-" * 60)
print ("Scanning Completed in: ", total)
|
normal
|
{
"blob_id": "7e0eefb1d913787f675adc2ba0dccb16007464e4",
"index": 1764,
"step-1": "<mask token>\n\n\ndef checkhost(ip):\n conf.verb = 0\n try:\n ping = sr1(IP(dst=ip) / ICMP())\n print('\\n[*] Target is up, beginning scan...')\n except Exception:\n print(\"\\n[!] Couldn't resolve target\")\n sys.exit('Exiting...')\n\n\n<mask token>\n\n\ndef scanport(port):\n startPort = RandShort()\n conf.verb = 0\n SYNACKpkt = sr1(IP(dst=target) / TCP(sport=startPort, endPort=port,\n flags='S'))\n pktflags = SYNACKpkt.getlayer(TCP).flags\n if pktflags == SYNACK:\n return True\n else:\n return False\n RSTpkt = IP(dst=target) / TCP(sport=startPort, endPort=port, flags='R')\n send(RSTpkt)\n\n\n<mask token>\n",
"step-2": "<mask token>\ngetLogger('scapy.runtime').setLevel(ERROR)\n<mask token>\nsubprocess.call('clear', shell=True)\n<mask token>\nprint('-' * 60)\nprint('Please wait, scanning remote host...', targetIP)\n<mask token>\nprint('Scan started at: ', localtime)\n\n\ndef checkhost(ip):\n conf.verb = 0\n try:\n ping = sr1(IP(dst=ip) / ICMP())\n print('\\n[*] Target is up, beginning scan...')\n except Exception:\n print(\"\\n[!] Couldn't resolve target\")\n sys.exit('Exiting...')\n\n\nprint('-' * 60)\n\n\ndef scanport(port):\n startPort = RandShort()\n conf.verb = 0\n SYNACKpkt = sr1(IP(dst=target) / TCP(sport=startPort, endPort=port,\n flags='S'))\n pktflags = SYNACKpkt.getlayer(TCP).flags\n if pktflags == SYNACK:\n return True\n else:\n return False\n RSTpkt = IP(dst=target) / TCP(sport=startPort, endPort=port, flags='R')\n send(RSTpkt)\n\n\ntry:\n for port in range(int(startPort), int(endPort) + 1):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((targetIP, port))\n if result == 0:\n print('Port {}: [+] Open'.format(port))\n elif result != 0:\n print('Port {}: [-] Closed'.format(port))\n sock.close()\nexcept KeyboardInterrupt:\n sys.exit('You pressed Ctrl+C')\nexcept socket.gaierror:\n sys.exit('Hostname could not be resolved. Exiting')\nexcept socket.error:\n sys.exit(\"Couldn't connect to server\")\n<mask token>\nprint('-' * 60)\nprint('Scanning Completed in: ', total)\n",
"step-3": "<mask token>\ngetLogger('scapy.runtime').setLevel(ERROR)\n<mask token>\nsubprocess.call('clear', shell=True)\ntarget = input('Enter a remote host to scan: ')\ntargetIP = socket.gethostbyname(target)\nstartPort = int(input('Enter the start port to scan: '))\nendPort = int(input('Enter the end port to scan: '))\nports = range(int(startPort), int(endPort) + 1)\nt1 = datetime.now()\nSYNACK = 18\nRSTACK = 20\nprint('-' * 60)\nprint('Please wait, scanning remote host...', targetIP)\nlocaltime = time.asctime(time.localtime())\nprint('Scan started at: ', localtime)\n\n\ndef checkhost(ip):\n conf.verb = 0\n try:\n ping = sr1(IP(dst=ip) / ICMP())\n print('\\n[*] Target is up, beginning scan...')\n except Exception:\n print(\"\\n[!] Couldn't resolve target\")\n sys.exit('Exiting...')\n\n\nprint('-' * 60)\n\n\ndef scanport(port):\n startPort = RandShort()\n conf.verb = 0\n SYNACKpkt = sr1(IP(dst=target) / TCP(sport=startPort, endPort=port,\n flags='S'))\n pktflags = SYNACKpkt.getlayer(TCP).flags\n if pktflags == SYNACK:\n return True\n else:\n return False\n RSTpkt = IP(dst=target) / TCP(sport=startPort, endPort=port, flags='R')\n send(RSTpkt)\n\n\ntry:\n for port in range(int(startPort), int(endPort) + 1):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((targetIP, port))\n if result == 0:\n print('Port {}: [+] Open'.format(port))\n elif result != 0:\n print('Port {}: [-] Closed'.format(port))\n sock.close()\nexcept KeyboardInterrupt:\n sys.exit('You pressed Ctrl+C')\nexcept socket.gaierror:\n sys.exit('Hostname could not be resolved. Exiting')\nexcept socket.error:\n sys.exit(\"Couldn't connect to server\")\nt2 = datetime.now()\ntotal = t2 - t1\nprint('-' * 60)\nprint('Scanning Completed in: ', total)\n",
"step-4": "import threading\nimport socket\nimport subprocess\nimport sys\nimport time\nimport scapy\nfrom threading import Thread, Lock\nfrom queue import Queue\nfrom datetime import datetime\nfrom logging import getLogger, ERROR\ngetLogger('scapy.runtime').setLevel(ERROR)\nfrom scapy.all import *\nsubprocess.call('clear', shell=True)\ntarget = input('Enter a remote host to scan: ')\ntargetIP = socket.gethostbyname(target)\nstartPort = int(input('Enter the start port to scan: '))\nendPort = int(input('Enter the end port to scan: '))\nports = range(int(startPort), int(endPort) + 1)\nt1 = datetime.now()\nSYNACK = 18\nRSTACK = 20\nprint('-' * 60)\nprint('Please wait, scanning remote host...', targetIP)\nlocaltime = time.asctime(time.localtime())\nprint('Scan started at: ', localtime)\n\n\ndef checkhost(ip):\n conf.verb = 0\n try:\n ping = sr1(IP(dst=ip) / ICMP())\n print('\\n[*] Target is up, beginning scan...')\n except Exception:\n print(\"\\n[!] Couldn't resolve target\")\n sys.exit('Exiting...')\n\n\nprint('-' * 60)\n\n\ndef scanport(port):\n startPort = RandShort()\n conf.verb = 0\n SYNACKpkt = sr1(IP(dst=target) / TCP(sport=startPort, endPort=port,\n flags='S'))\n pktflags = SYNACKpkt.getlayer(TCP).flags\n if pktflags == SYNACK:\n return True\n else:\n return False\n RSTpkt = IP(dst=target) / TCP(sport=startPort, endPort=port, flags='R')\n send(RSTpkt)\n\n\ntry:\n for port in range(int(startPort), int(endPort) + 1):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((targetIP, port))\n if result == 0:\n print('Port {}: [+] Open'.format(port))\n elif result != 0:\n print('Port {}: [-] Closed'.format(port))\n sock.close()\nexcept KeyboardInterrupt:\n sys.exit('You pressed Ctrl+C')\nexcept socket.gaierror:\n sys.exit('Hostname could not be resolved. Exiting')\nexcept socket.error:\n sys.exit(\"Couldn't connect to server\")\nt2 = datetime.now()\ntotal = t2 - t1\nprint('-' * 60)\nprint('Scanning Completed in: ', total)\n",
"step-5": "# Original code from http://www.pythonforbeginners.com/code-snippets-source-code/port-scanner-in-python\n#!/usr/bin/env python\n\n# modules\nimport threading\nimport socket\nimport subprocess\nimport sys\nimport time\nimport scapy\n\nfrom threading import Thread, Lock\nfrom queue import Queue\nfrom datetime import datetime\nfrom logging import getLogger, ERROR\ngetLogger(\"scapy.runtime\") .setLevel (ERROR)\nfrom scapy.all import *\n\nsubprocess.call('clear', shell=True)\n\n# print_lock = threading.Lock() - WIP, threading not implemented yet.\n\n# Enter target host and port range\ntarget = input(\"Enter a remote host to scan: \")\ntargetIP = socket.gethostbyname(target)\nstartPort = int(input(\"Enter the start port to scan: \"))\nendPort = int(input(\"Enter the end port to scan: \"))\n\n# Setting some values\nports = range(int(startPort), int(endPort)+1)\nt1 = datetime.now()\nSYNACK = 0x12\nRSTACK = 0x14\n\n# Banner displaying which host is being scanned\nprint (\"-\" * 60)\nprint (\"Please wait, scanning remote host...\", targetIP)\nlocaltime = time.asctime(time.localtime())\nprint (\"Scan started at: \", localtime)\ndef checkhost(ip):\n conf.verb = 0\n try:\n ping = sr1(IP(dst = ip)/ICMP())\n print (\"\\n[*] Target is up, beginning scan...\") #this text isn't displayed - why?\n except Exception:\n print (\"\\n[!] Couldn't resolve target\")\n sys.exit(\"Exiting...\")\nprint (\"-\" * 60)\n\ndef scanport(port):\n startPort = RandShort() # scapy func that generates a small random nr to use as a source port.\n conf.verb = 0 # prevents output from sending pkts from being printed to the screen.\n SYNACKpkt = sr1(IP(dst = target)/TCP(sport = startPort, endPort = port, flags = \"S\")) # Scapy func sr1() used to craft & send a SYN pkt .\n pktflags = SYNACKpkt.getlayer(TCP).flags\n if pktflags == SYNACK:\n return True\n else:\n return False\n RSTpkt = IP(dst = target)/TCP(sport = startPort, endPort = port, flags = \"R\")\n send(RSTpkt)\n\n\n# Error handling\ntry:\n for port in range(int(startPort), int(endPort)+1):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((targetIP, port))\n if result == 0:\n print (\"Port {}: [+] Open\".format(port))\n elif result != 0:\n print (\"Port {}: [-] Closed\".format(port))\n sock.close()\n\nexcept KeyboardInterrupt:\n sys.exit(\"You pressed Ctrl+C\")\n\nexcept socket.gaierror:\n sys.exit(\"Hostname could not be resolved. Exiting\")\n\nexcept socket.error:\n sys.exit(\"Couldn't connect to server\")\n\nt2 = datetime.now()\n\n# Calculates the difference in time, to see how long it took to run the script\ntotal = t2 - t1\nprint (\"-\" * 60)\nprint (\"Scanning Completed in: \", total)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
for name in ["Madan", "Mohan", "Reddy", "Govindu"]:
print("My name includes "+name)
# Tables
# for i in range(1, 11):
# for j in range(1, 11):
# print("{0} * {1} = {2}".format(i,j, i*j))
# print("\n")
for i in range(1, 3):
for j in range(4, 7):
if j==5:
break
print(j)
|
normal
|
{
"blob_id": "c0376d94b34ea43e562e68cd65d4e5d2c5b04fb3",
"index": 6657,
"step-1": "<mask token>\n",
"step-2": "for name in ['Madan', 'Mohan', 'Reddy', 'Govindu']:\n print('My name includes ' + name)\nfor i in range(1, 3):\n for j in range(4, 7):\n if j == 5:\n break\n print(j)\n",
"step-3": "for name in [\"Madan\", \"Mohan\", \"Reddy\", \"Govindu\"]:\n print(\"My name includes \"+name)\n\n# Tables\n# for i in range(1, 11):\n# for j in range(1, 11):\n# print(\"{0} * {1} = {2}\".format(i,j, i*j))\n# print(\"\\n\")\n\n\nfor i in range(1, 3):\n for j in range(4, 7):\n if j==5:\n break\n print(j)",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
import cv2
from pixcel import *
from scipy import ndimage
import math
from socket import *
from config import *
from time import time
def find_bounding_boxes(fimage, lables):
# initialize boxes array
boxes = []
for lable in lables:
# iterate all lables
# filter out image pixels with current lable
labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled)
# append found bouding box
boxes.append(box)
return boxes
def find_margined_bounding_boxes(fimage, lables, margins):
# initialize boxes array
boxes = []
for lable in lables:
# iterate all lables
# filter out image pixels with current lable
labled = (fimage == lable) + 0
# find indexes
box = find_bounding_box(labled, margins)
# append found bouding box
boxes.append(box)
return boxes
def find_bounding_box(binary_matrix, margins=(0, 0)):
# extract indexes of foreground pixels
indicies = np.array(np.nonzero(binary_matrix + 0))
# get contours
ys = margins[1] + np.amin(indicies[0])
ye = margins[1] + np.amax(indicies[0])
xs = margins[0] + np.amin(indicies[1])
xe = margins[0] + np.amax(indicies[1])
# return contours
return [(xs, ys), (xe, ye)]
def weightFilter(image, lables, weight):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8((image == max) + 0)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_bounding_boxes(image.copy(), retained_lables)
return fimage, boxes
def weightFilterMini(image, weight):
image = np.uint8(image)
# extract contours
image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
final_contours = []
for cnt in contours:
if cv2.contourArea(cnt) >= weight:
# add it to final_contours
final_contours.append(cnt)
fimage = np.zeros((image.shape[:2]), np.uint8)
cv2.drawContours(fimage, final_contours, -1, 255, -1)
boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.boundingRect(cnt) for cnt in final_contours]))
return fimage, boxes
def weightFilterMargined(image, lables, weight, margins):
max = 0
weights = np.zeros((lables))
fimage = np.zeros_like(image)
retained_lables = []
for i in range(lables):
weights[i] = np.sum(np.sum(image == i))
if weights[i] > weights[max]:
max = i
if weights[i] > weight:
fimage += np.uint8((image == i) + 0)
retained_lables.append(i)
fimage -= np.uint8(image == max)
fimage = np.uint8(fimage * 255)
boxes = []
if (len(retained_lables) > 0):
retained_lables.remove(max)
boxes = find_margined_bounding_boxes(image.copy(), retained_lables, margins)
return fimage, boxes
def calculatePossiblePadding(box, shape, default = 20):
w_pad = default
h_pad = default
# dynamic padding
if default == 0:
rbox = RBox.fromPointBoundingBox(box)
w_pad = round(0.205 * rbox.w)
h_pad = round(0.205 * rbox.h)
# extract with and height from shape
height, width = shape[0:2]
# extract starting, ending x and y from box
((x_start, y_start), (x_end, y_end)) = box
# check if is it possible to add certain padding
# if not add possible padding for all 4 points
pad_x_start = h_pad
if y_start - pad_x_start < 0:
pad_x_start = y_start
pad_y_start = w_pad
if x_start - pad_y_start < 0:
pad_y_start = x_start
pad_x_end = w_pad
if y_end + pad_x_end >= height:
pad_x_end = height - y_end - 1
pad_y_end = h_pad
if x_end + pad_y_end >= width:
pad_y_end = width - x_end - 1
# return resultant padding
return pad_x_start, pad_x_end, pad_y_start, pad_y_end
def findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):
img = frame.copy() # gray-scale image
# smooth the image (to remove small objects)
imgf = ndimage.gaussian_filter(img, blur_radius)
# find connected components
labeled, nr_objects = ndimage.label(imgf > threshold)
return labeled, nr_objects
def drawBoundingBox(im, start, end, color):
cv2.rectangle(im, start, end, color, 1)
def pwpBasedTracking(image, frame_models, threshold):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
predicted = np.zeros((image.shape[0:2]), np.uint8)
# FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH
for fm in frame_models:
patch = extractPatch(image, fm[1])
#patch = cv2.medianBlur(patch, 5)
mask = np.zeros(patch.shape[0:2], np.uint8)
res = applyModel(patch, mask, fm[0])
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)
res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)
if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):
predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;
return predicted
def extractPatch(im, box):
# extract coordinates
x1, x2, y1, y2 = box
# extract and return patch
return im[x1: x2, y1: y2, :]
def randomColor():
return np.random.randint(0, 255, (1, 3))[0].tolist()
def performColorProcessing(image, mask, iterations = 1):
# initialize kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(iterations):
model = computePosteriors(image, np.uint8(mask > 0) + 0)
mask = applyModel(image, mask, model)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)
return mask
def killDyingLables(frame, mask, threshold = 0.5):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001
# get final labled frame
labled_frame = frame * mask
# get final weights
final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = (final_weights/initial_weights) < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
labled_frame -= np.uint8((labled_frame == lable) * lable)
# return final labled frame
return labled_frame
def killSmallLables(frame, threshold = 150):
# get initial weights of lables
initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])
# final probabilites
final_probs = initial_weights < threshold
for lable in range(len(final_probs)):
dying = final_probs[lable]
# check is lable is dying
if dying:
# kill lable
frame -= np.uint8(np.uint8(frame == lable) * lable)
# return final labled frame
return frame
class RBox:
def __init__(self):
# initialize atributes
self.x = 0
self.y = 0
self.w = 0
self.h = 0
@staticmethod
def fromClassicalBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[1]
rbox.w = box[2]
rbox.h = box[3]
# return rbox
return rbox
@staticmethod
def fromClassicalBoundingBoxes(boxes):
return [RBox.fromClassicalBoundingBox(box) for box in boxes]
@staticmethod
def fromRoughBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0]
rbox.y = box[2]
rbox.h = box[1] - box[0]
rbox.w = box[3] - box[2]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBox(box):
# initialize rbox
rbox = RBox()
# copy attributes
rbox.x = box[0][0]
rbox.y = box[0][1]
rbox.w = box[1][0] - box[0][0]
rbox.h = box[1][1] - box[0][1]
# return rbox
return rbox
@staticmethod
def fromPointBoundingBoxes(boxes):
return [RBox.fromPointBoundingBox(box) for box in boxes]
def classicalBoundingBox(self):
# return array like bounding box
return [self.x, self.y, self.w, self.h]
def pointBoundingBox(self):
# return tuple of end points
return ((self.x, self.y), (self.x + self.w, self.y + self.h))
def area(self):
return self.h * self.w
def __or__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = min(self.x, other_box.x)
rbox.y = min(self.y, other_box.y)
rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y
return rbox
def __and__(self, other_box):
# initialize resultant box
rbox = RBox()
# calculate values
rbox.x = max(self.x, other_box.x)
rbox.y = max(self.y, other_box.y)
rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x
rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y
if rbox.w < 0 or rbox.h < 0:
# reinitailize or make it zero
rbox = RBox()
return rbox
def similarity(self, other_box):
# (A & B)/(A | B) = (A & B).area/(A.area + B.area - (A & B).area)
#return (self & other_box).area()/(self.area() + other_box.area() - (self & other_box).area())
min_area = min(self.area(), other_box.area())
return (self & other_box).area()/min_area
def __str__(self):
return "{} {} {} {}".format(self.x, self.y, self.w, self.h)
def __mul__(self, other_box):
# calculate similarity and return
return self.similarity(other_box)
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.w == other.w and self.h == other.h
@staticmethod
def similarityStats(boxes):
# create matrix out of boxes
sim_mat = np.array(boxes).reshape((-1, 1))
sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)
# return similarity matrix
return sim_mat
@staticmethod
def similarityThreshold(boxes, threshold = 0.8):
# get similarity matrix
sim_mat = RBox.similarityStats(boxes)
# find thresholded indexes
ind = np.array(np.nonzero(sim_mat > threshold))
# return in the form of list
return list(ind.T)
@staticmethod
def reduceBoxes(boxes, threshold=0.8):
similar_boxes = RBox.similarityThreshold(boxes, threshold)
while len(similar_boxes) > 0:
union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]
# remove similar boxes
del boxes[similar_boxes[0][0]]
del boxes[similar_boxes[0][1]]
boxes.append(union)
similar_boxes = RBox.similarityThreshold(boxes, threshold)
return boxes
@staticmethod
def toPointBoundingBoxes(boxes):
return [box.pointBoundingBox() for box in boxes]
@staticmethod
def toClassicBoundingBoxes(boxes):
return [box.classicalBoundingBox() for box in boxes]
def extractPatchFromImage(self, image, square=False):
# get bounding box end points
(start, end) = self.pointBoundingBox()
start, end = list(start), list(end)
# check if square flag is on
if square:
im_h, im_w = image.shape[0:2]
# adjust start and end so that height and width are equal
if self.h != self.w:
# find bigger size
if self.h > self.w:
# find difference
diff = self.h - self.w
if start[0] >= int(diff/2):
start[0] -= math.floor(diff/2)
diff -= math.floor(diff/2)
else:
diff -= start[0]
start[0] = 0
end[0] += diff
if end[0] >= im_w:
diff = end[0] - im_w + 1
end[1] -= diff
else:
# find difference
diff = self.w - self.h
if start[1] >= int(diff / 2):
start[1] -= math.floor(diff / 2)
diff -= math.floor(diff / 2)
else:
diff -= start[1]
start[1] = 0
end[1] += diff
if end[1] >= im_h:
diff = end[1] - im_h + 1
end[0] -= diff
# return patch
return image[start[1]: end[1], start[0]: end[0]]
def addPatchtoImage(self, image, patch):
# get bounding box end points
(start, end) = self.pointBoundingBox()
# patch in to image
image[start[1]: end[1], start[0]: end[0]] = patch
# return image
return image
def askForLable(patch):
# write an image to send
cv2.imwrite("patch.jpg", patch)
# setup client socket
clientSock = socket(AF_INET, SOCK_STREAM)
clientSock.connect((TCP_IP, TCP_PORT))
# open image
image = open("patch.jpg", 'rb')
# read bytes equal to buffer size
data = image.read(BUFFER_SIZE)
# while image still has data
while (data):
# send data to server
clientSock.send(data)
# read more data if available
data = image.read(BUFFER_SIZE)
# close file
image.close()
# signal server to end data stream
clientSock.shutdown(SHUT_WR)
# recieved lable as binary data from server and convert it to string
label = clientSock.recv(1024)
label = label.decode("utf-8")
return label
|
normal
|
{
"blob_id": "f3895f38be29fb07903237d8846cc9d657b39ea9",
"index": 6495,
"step-1": "<mask token>\n\n\nclass RBox:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0\n\n @staticmethod\n def fromClassicalBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[1]\n rbox.w = box[2]\n rbox.h = box[3]\n return rbox\n\n @staticmethod\n def fromClassicalBoundingBoxes(boxes):\n return [RBox.fromClassicalBoundingBox(box) for box in boxes]\n\n @staticmethod\n def fromRoughBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[2]\n rbox.h = box[1] - box[0]\n rbox.w = box[3] - box[2]\n return rbox\n\n @staticmethod\n def fromPointBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0][0]\n rbox.y = box[0][1]\n rbox.w = box[1][0] - box[0][0]\n rbox.h = box[1][1] - box[0][1]\n return rbox\n\n @staticmethod\n def fromPointBoundingBoxes(boxes):\n return [RBox.fromPointBoundingBox(box) for box in boxes]\n\n def classicalBoundingBox(self):\n return [self.x, self.y, self.w, self.h]\n\n def pointBoundingBox(self):\n return (self.x, self.y), (self.x + self.w, self.y + self.h)\n\n def area(self):\n return self.h * self.w\n\n def __or__(self, other_box):\n rbox = RBox()\n rbox.x = min(self.x, other_box.x)\n rbox.y = min(self.y, other_box.y)\n rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y\n return rbox\n\n def __and__(self, other_box):\n rbox = RBox()\n rbox.x = max(self.x, other_box.x)\n rbox.y = max(self.y, other_box.y)\n rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y\n if rbox.w < 0 or rbox.h < 0:\n rbox = RBox()\n return rbox\n\n def similarity(self, other_box):\n min_area = min(self.area(), other_box.area())\n return (self & other_box).area() / min_area\n\n def __str__(self):\n return '{} {} {} {}'.format(self.x, self.y, self.w, self.h)\n\n def __mul__(self, other_box):\n return self.similarity(other_box)\n\n def __eq__(self, other):\n return (self.x == other.x and self.y == other.y and self.w == other\n .w and self.h == other.h)\n\n @staticmethod\n def similarityStats(boxes):\n sim_mat = np.array(boxes).reshape((-1, 1))\n sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)\n return sim_mat\n\n @staticmethod\n def similarityThreshold(boxes, threshold=0.8):\n sim_mat = RBox.similarityStats(boxes)\n ind = np.array(np.nonzero(sim_mat > threshold))\n return list(ind.T)\n\n @staticmethod\n def reduceBoxes(boxes, threshold=0.8):\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n while len(similar_boxes) > 0:\n union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][1]]\n boxes.append(union)\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n return boxes\n\n @staticmethod\n def toPointBoundingBoxes(boxes):\n return [box.pointBoundingBox() for box in boxes]\n\n @staticmethod\n def toClassicBoundingBoxes(boxes):\n return [box.classicalBoundingBox() for box in boxes]\n\n def extractPatchFromImage(self, image, square=False):\n start, end = self.pointBoundingBox()\n start, end = list(start), list(end)\n if square:\n im_h, im_w = image.shape[0:2]\n if self.h != self.w:\n if self.h > self.w:\n diff = self.h - self.w\n if start[0] >= int(diff / 2):\n start[0] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[0]\n start[0] = 0\n end[0] += diff\n if end[0] >= im_w:\n diff = end[0] - im_w + 1\n end[1] -= diff\n else:\n diff = self.w - self.h\n if start[1] >= int(diff / 2):\n start[1] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[1]\n start[1] = 0\n end[1] += diff\n if end[1] >= im_h:\n diff = end[1] - im_h + 1\n end[0] -= diff\n return image[start[1]:end[1], start[0]:end[0]]\n\n def addPatchtoImage(self, image, patch):\n start, end = self.pointBoundingBox()\n image[start[1]:end[1], start[0]:end[0]] = patch\n return image\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_bounding_box(binary_matrix, margins=(0, 0)):\n indicies = np.array(np.nonzero(binary_matrix + 0))\n ys = margins[1] + np.amin(indicies[0])\n ye = margins[1] + np.amax(indicies[0])\n xs = margins[0] + np.amin(indicies[1])\n xe = margins[0] + np.amax(indicies[1])\n return [(xs, ys), (xe, ye)]\n\n\n<mask token>\n\n\ndef findConnectedComponents(frame, threshold=150, blur_radius=1.0):\n img = frame.copy()\n imgf = ndimage.gaussian_filter(img, blur_radius)\n labeled, nr_objects = ndimage.label(imgf > threshold)\n return labeled, nr_objects\n\n\n<mask token>\n\n\ndef performColorProcessing(image, mask, iterations=1):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n for i in range(iterations):\n model = computePosteriors(image, np.uint8(mask > 0) + 0)\n mask = applyModel(image, mask, model)\n cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)\n return mask\n\n\n<mask token>\n\n\nclass RBox:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0\n\n @staticmethod\n def fromClassicalBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[1]\n rbox.w = box[2]\n rbox.h = box[3]\n return rbox\n\n @staticmethod\n def fromClassicalBoundingBoxes(boxes):\n return [RBox.fromClassicalBoundingBox(box) for box in boxes]\n\n @staticmethod\n def fromRoughBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[2]\n rbox.h = box[1] - box[0]\n rbox.w = box[3] - box[2]\n return rbox\n\n @staticmethod\n def fromPointBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0][0]\n rbox.y = box[0][1]\n rbox.w = box[1][0] - box[0][0]\n rbox.h = box[1][1] - box[0][1]\n return rbox\n\n @staticmethod\n def fromPointBoundingBoxes(boxes):\n return [RBox.fromPointBoundingBox(box) for box in boxes]\n\n def classicalBoundingBox(self):\n return [self.x, self.y, self.w, self.h]\n\n def pointBoundingBox(self):\n return (self.x, self.y), (self.x + self.w, self.y + self.h)\n\n def area(self):\n return self.h * self.w\n\n def __or__(self, other_box):\n rbox = RBox()\n rbox.x = min(self.x, other_box.x)\n rbox.y = min(self.y, other_box.y)\n rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y\n return rbox\n\n def __and__(self, other_box):\n rbox = RBox()\n rbox.x = max(self.x, other_box.x)\n rbox.y = max(self.y, other_box.y)\n rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y\n if rbox.w < 0 or rbox.h < 0:\n rbox = RBox()\n return rbox\n\n def similarity(self, other_box):\n min_area = min(self.area(), other_box.area())\n return (self & other_box).area() / min_area\n\n def __str__(self):\n return '{} {} {} {}'.format(self.x, self.y, self.w, self.h)\n\n def __mul__(self, other_box):\n return self.similarity(other_box)\n\n def __eq__(self, other):\n return (self.x == other.x and self.y == other.y and self.w == other\n .w and self.h == other.h)\n\n @staticmethod\n def similarityStats(boxes):\n sim_mat = np.array(boxes).reshape((-1, 1))\n sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)\n return sim_mat\n\n @staticmethod\n def similarityThreshold(boxes, threshold=0.8):\n sim_mat = RBox.similarityStats(boxes)\n ind = np.array(np.nonzero(sim_mat > threshold))\n return list(ind.T)\n\n @staticmethod\n def reduceBoxes(boxes, threshold=0.8):\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n while len(similar_boxes) > 0:\n union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][1]]\n boxes.append(union)\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n return boxes\n\n @staticmethod\n def toPointBoundingBoxes(boxes):\n return [box.pointBoundingBox() for box in boxes]\n\n @staticmethod\n def toClassicBoundingBoxes(boxes):\n return [box.classicalBoundingBox() for box in boxes]\n\n def extractPatchFromImage(self, image, square=False):\n start, end = self.pointBoundingBox()\n start, end = list(start), list(end)\n if square:\n im_h, im_w = image.shape[0:2]\n if self.h != self.w:\n if self.h > self.w:\n diff = self.h - self.w\n if start[0] >= int(diff / 2):\n start[0] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[0]\n start[0] = 0\n end[0] += diff\n if end[0] >= im_w:\n diff = end[0] - im_w + 1\n end[1] -= diff\n else:\n diff = self.w - self.h\n if start[1] >= int(diff / 2):\n start[1] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[1]\n start[1] = 0\n end[1] += diff\n if end[1] >= im_h:\n diff = end[1] - im_h + 1\n end[0] -= diff\n return image[start[1]:end[1], start[0]:end[0]]\n\n def addPatchtoImage(self, image, patch):\n start, end = self.pointBoundingBox()\n image[start[1]:end[1], start[0]:end[0]] = patch\n return image\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_margined_bounding_boxes(fimage, lables, margins):\n boxes = []\n for lable in lables:\n labled = (fimage == lable) + 0\n box = find_bounding_box(labled, margins)\n boxes.append(box)\n return boxes\n\n\ndef find_bounding_box(binary_matrix, margins=(0, 0)):\n indicies = np.array(np.nonzero(binary_matrix + 0))\n ys = margins[1] + np.amin(indicies[0])\n ye = margins[1] + np.amax(indicies[0])\n xs = margins[0] + np.amin(indicies[1])\n xe = margins[0] + np.amax(indicies[1])\n return [(xs, ys), (xe, ye)]\n\n\n<mask token>\n\n\ndef findConnectedComponents(frame, threshold=150, blur_radius=1.0):\n img = frame.copy()\n imgf = ndimage.gaussian_filter(img, blur_radius)\n labeled, nr_objects = ndimage.label(imgf > threshold)\n return labeled, nr_objects\n\n\n<mask token>\n\n\ndef performColorProcessing(image, mask, iterations=1):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n for i in range(iterations):\n model = computePosteriors(image, np.uint8(mask > 0) + 0)\n mask = applyModel(image, mask, model)\n cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)\n return mask\n\n\n<mask token>\n\n\ndef killSmallLables(frame, threshold=150):\n initial_weights = np.array([np.sum(frame == lable) for lable in range(\n np.amax(frame) + 1)])\n final_probs = initial_weights < threshold\n for lable in range(len(final_probs)):\n dying = final_probs[lable]\n if dying:\n frame -= np.uint8(np.uint8(frame == lable) * lable)\n return frame\n\n\nclass RBox:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0\n\n @staticmethod\n def fromClassicalBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[1]\n rbox.w = box[2]\n rbox.h = box[3]\n return rbox\n\n @staticmethod\n def fromClassicalBoundingBoxes(boxes):\n return [RBox.fromClassicalBoundingBox(box) for box in boxes]\n\n @staticmethod\n def fromRoughBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[2]\n rbox.h = box[1] - box[0]\n rbox.w = box[3] - box[2]\n return rbox\n\n @staticmethod\n def fromPointBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0][0]\n rbox.y = box[0][1]\n rbox.w = box[1][0] - box[0][0]\n rbox.h = box[1][1] - box[0][1]\n return rbox\n\n @staticmethod\n def fromPointBoundingBoxes(boxes):\n return [RBox.fromPointBoundingBox(box) for box in boxes]\n\n def classicalBoundingBox(self):\n return [self.x, self.y, self.w, self.h]\n\n def pointBoundingBox(self):\n return (self.x, self.y), (self.x + self.w, self.y + self.h)\n\n def area(self):\n return self.h * self.w\n\n def __or__(self, other_box):\n rbox = RBox()\n rbox.x = min(self.x, other_box.x)\n rbox.y = min(self.y, other_box.y)\n rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y\n return rbox\n\n def __and__(self, other_box):\n rbox = RBox()\n rbox.x = max(self.x, other_box.x)\n rbox.y = max(self.y, other_box.y)\n rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y\n if rbox.w < 0 or rbox.h < 0:\n rbox = RBox()\n return rbox\n\n def similarity(self, other_box):\n min_area = min(self.area(), other_box.area())\n return (self & other_box).area() / min_area\n\n def __str__(self):\n return '{} {} {} {}'.format(self.x, self.y, self.w, self.h)\n\n def __mul__(self, other_box):\n return self.similarity(other_box)\n\n def __eq__(self, other):\n return (self.x == other.x and self.y == other.y and self.w == other\n .w and self.h == other.h)\n\n @staticmethod\n def similarityStats(boxes):\n sim_mat = np.array(boxes).reshape((-1, 1))\n sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)\n return sim_mat\n\n @staticmethod\n def similarityThreshold(boxes, threshold=0.8):\n sim_mat = RBox.similarityStats(boxes)\n ind = np.array(np.nonzero(sim_mat > threshold))\n return list(ind.T)\n\n @staticmethod\n def reduceBoxes(boxes, threshold=0.8):\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n while len(similar_boxes) > 0:\n union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][1]]\n boxes.append(union)\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n return boxes\n\n @staticmethod\n def toPointBoundingBoxes(boxes):\n return [box.pointBoundingBox() for box in boxes]\n\n @staticmethod\n def toClassicBoundingBoxes(boxes):\n return [box.classicalBoundingBox() for box in boxes]\n\n def extractPatchFromImage(self, image, square=False):\n start, end = self.pointBoundingBox()\n start, end = list(start), list(end)\n if square:\n im_h, im_w = image.shape[0:2]\n if self.h != self.w:\n if self.h > self.w:\n diff = self.h - self.w\n if start[0] >= int(diff / 2):\n start[0] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[0]\n start[0] = 0\n end[0] += diff\n if end[0] >= im_w:\n diff = end[0] - im_w + 1\n end[1] -= diff\n else:\n diff = self.w - self.h\n if start[1] >= int(diff / 2):\n start[1] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[1]\n start[1] = 0\n end[1] += diff\n if end[1] >= im_h:\n diff = end[1] - im_h + 1\n end[0] -= diff\n return image[start[1]:end[1], start[0]:end[0]]\n\n def addPatchtoImage(self, image, patch):\n start, end = self.pointBoundingBox()\n image[start[1]:end[1], start[0]:end[0]] = patch\n return image\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef find_bounding_boxes(fimage, lables):\n boxes = []\n for lable in lables:\n labled = (fimage == lable) + 0\n box = find_bounding_box(labled)\n boxes.append(box)\n return boxes\n\n\ndef find_margined_bounding_boxes(fimage, lables, margins):\n boxes = []\n for lable in lables:\n labled = (fimage == lable) + 0\n box = find_bounding_box(labled, margins)\n boxes.append(box)\n return boxes\n\n\ndef find_bounding_box(binary_matrix, margins=(0, 0)):\n indicies = np.array(np.nonzero(binary_matrix + 0))\n ys = margins[1] + np.amin(indicies[0])\n ye = margins[1] + np.amax(indicies[0])\n xs = margins[0] + np.amin(indicies[1])\n xe = margins[0] + np.amax(indicies[1])\n return [(xs, ys), (xe, ye)]\n\n\n<mask token>\n\n\ndef weightFilterMini(image, weight):\n image = np.uint8(image)\n image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n final_contours = []\n for cnt in contours:\n if cv2.contourArea(cnt) >= weight:\n final_contours.append(cnt)\n fimage = np.zeros(image.shape[:2], np.uint8)\n cv2.drawContours(fimage, final_contours, -1, 255, -1)\n boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.\n boundingRect(cnt) for cnt in final_contours]))\n return fimage, boxes\n\n\ndef weightFilterMargined(image, lables, weight, margins):\n max = 0\n weights = np.zeros(lables)\n fimage = np.zeros_like(image)\n retained_lables = []\n for i in range(lables):\n weights[i] = np.sum(np.sum(image == i))\n if weights[i] > weights[max]:\n max = i\n if weights[i] > weight:\n fimage += np.uint8((image == i) + 0)\n retained_lables.append(i)\n fimage -= np.uint8(image == max)\n fimage = np.uint8(fimage * 255)\n boxes = []\n if len(retained_lables) > 0:\n retained_lables.remove(max)\n boxes = find_margined_bounding_boxes(image.copy(), retained_lables,\n margins)\n return fimage, boxes\n\n\ndef calculatePossiblePadding(box, shape, default=20):\n w_pad = default\n h_pad = default\n if default == 0:\n rbox = RBox.fromPointBoundingBox(box)\n w_pad = round(0.205 * rbox.w)\n h_pad = round(0.205 * rbox.h)\n height, width = shape[0:2]\n (x_start, y_start), (x_end, y_end) = box\n pad_x_start = h_pad\n if y_start - pad_x_start < 0:\n pad_x_start = y_start\n pad_y_start = w_pad\n if x_start - pad_y_start < 0:\n pad_y_start = x_start\n pad_x_end = w_pad\n if y_end + pad_x_end >= height:\n pad_x_end = height - y_end - 1\n pad_y_end = h_pad\n if x_end + pad_y_end >= width:\n pad_y_end = width - x_end - 1\n return pad_x_start, pad_x_end, pad_y_start, pad_y_end\n\n\ndef findConnectedComponents(frame, threshold=150, blur_radius=1.0):\n img = frame.copy()\n imgf = ndimage.gaussian_filter(img, blur_radius)\n labeled, nr_objects = ndimage.label(imgf > threshold)\n return labeled, nr_objects\n\n\ndef drawBoundingBox(im, start, end, color):\n cv2.rectangle(im, start, end, color, 1)\n\n\n<mask token>\n\n\ndef extractPatch(im, box):\n x1, x2, y1, y2 = box\n return im[x1:x2, y1:y2, :]\n\n\ndef randomColor():\n return np.random.randint(0, 255, (1, 3))[0].tolist()\n\n\ndef performColorProcessing(image, mask, iterations=1):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n for i in range(iterations):\n model = computePosteriors(image, np.uint8(mask > 0) + 0)\n mask = applyModel(image, mask, model)\n cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)\n return mask\n\n\ndef killDyingLables(frame, mask, threshold=0.5):\n initial_weights = np.array([np.sum(frame == lable) for lable in range(\n np.amax(frame) + 1)]) + 1e-05\n labled_frame = frame * mask\n final_weights = np.array([np.sum(labled_frame == lable) for lable in\n range(np.amax(frame) + 1)])\n final_probs = final_weights / initial_weights < threshold\n for lable in range(len(final_probs)):\n dying = final_probs[lable]\n if dying:\n labled_frame -= np.uint8((labled_frame == lable) * lable)\n return labled_frame\n\n\ndef killSmallLables(frame, threshold=150):\n initial_weights = np.array([np.sum(frame == lable) for lable in range(\n np.amax(frame) + 1)])\n final_probs = initial_weights < threshold\n for lable in range(len(final_probs)):\n dying = final_probs[lable]\n if dying:\n frame -= np.uint8(np.uint8(frame == lable) * lable)\n return frame\n\n\nclass RBox:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0\n\n @staticmethod\n def fromClassicalBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[1]\n rbox.w = box[2]\n rbox.h = box[3]\n return rbox\n\n @staticmethod\n def fromClassicalBoundingBoxes(boxes):\n return [RBox.fromClassicalBoundingBox(box) for box in boxes]\n\n @staticmethod\n def fromRoughBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0]\n rbox.y = box[2]\n rbox.h = box[1] - box[0]\n rbox.w = box[3] - box[2]\n return rbox\n\n @staticmethod\n def fromPointBoundingBox(box):\n rbox = RBox()\n rbox.x = box[0][0]\n rbox.y = box[0][1]\n rbox.w = box[1][0] - box[0][0]\n rbox.h = box[1][1] - box[0][1]\n return rbox\n\n @staticmethod\n def fromPointBoundingBoxes(boxes):\n return [RBox.fromPointBoundingBox(box) for box in boxes]\n\n def classicalBoundingBox(self):\n return [self.x, self.y, self.w, self.h]\n\n def pointBoundingBox(self):\n return (self.x, self.y), (self.x + self.w, self.y + self.h)\n\n def area(self):\n return self.h * self.w\n\n def __or__(self, other_box):\n rbox = RBox()\n rbox.x = min(self.x, other_box.x)\n rbox.y = min(self.y, other_box.y)\n rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y\n return rbox\n\n def __and__(self, other_box):\n rbox = RBox()\n rbox.x = max(self.x, other_box.x)\n rbox.y = max(self.y, other_box.y)\n rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y\n if rbox.w < 0 or rbox.h < 0:\n rbox = RBox()\n return rbox\n\n def similarity(self, other_box):\n min_area = min(self.area(), other_box.area())\n return (self & other_box).area() / min_area\n\n def __str__(self):\n return '{} {} {} {}'.format(self.x, self.y, self.w, self.h)\n\n def __mul__(self, other_box):\n return self.similarity(other_box)\n\n def __eq__(self, other):\n return (self.x == other.x and self.y == other.y and self.w == other\n .w and self.h == other.h)\n\n @staticmethod\n def similarityStats(boxes):\n sim_mat = np.array(boxes).reshape((-1, 1))\n sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)\n return sim_mat\n\n @staticmethod\n def similarityThreshold(boxes, threshold=0.8):\n sim_mat = RBox.similarityStats(boxes)\n ind = np.array(np.nonzero(sim_mat > threshold))\n return list(ind.T)\n\n @staticmethod\n def reduceBoxes(boxes, threshold=0.8):\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n while len(similar_boxes) > 0:\n union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][1]]\n boxes.append(union)\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n return boxes\n\n @staticmethod\n def toPointBoundingBoxes(boxes):\n return [box.pointBoundingBox() for box in boxes]\n\n @staticmethod\n def toClassicBoundingBoxes(boxes):\n return [box.classicalBoundingBox() for box in boxes]\n\n def extractPatchFromImage(self, image, square=False):\n start, end = self.pointBoundingBox()\n start, end = list(start), list(end)\n if square:\n im_h, im_w = image.shape[0:2]\n if self.h != self.w:\n if self.h > self.w:\n diff = self.h - self.w\n if start[0] >= int(diff / 2):\n start[0] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[0]\n start[0] = 0\n end[0] += diff\n if end[0] >= im_w:\n diff = end[0] - im_w + 1\n end[1] -= diff\n else:\n diff = self.w - self.h\n if start[1] >= int(diff / 2):\n start[1] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n diff -= start[1]\n start[1] = 0\n end[1] += diff\n if end[1] >= im_h:\n diff = end[1] - im_h + 1\n end[0] -= diff\n return image[start[1]:end[1], start[0]:end[0]]\n\n def addPatchtoImage(self, image, patch):\n start, end = self.pointBoundingBox()\n image[start[1]:end[1], start[0]:end[0]] = patch\n return image\n\n\ndef askForLable(patch):\n cv2.imwrite('patch.jpg', patch)\n clientSock = socket(AF_INET, SOCK_STREAM)\n clientSock.connect((TCP_IP, TCP_PORT))\n image = open('patch.jpg', 'rb')\n data = image.read(BUFFER_SIZE)\n while data:\n clientSock.send(data)\n data = image.read(BUFFER_SIZE)\n image.close()\n clientSock.shutdown(SHUT_WR)\n label = clientSock.recv(1024)\n label = label.decode('utf-8')\n return label\n",
"step-5": "import numpy as np\nimport cv2\nfrom pixcel import *\nfrom scipy import ndimage\nimport math\nfrom socket import *\nfrom config import *\nfrom time import time\n\n\ndef find_bounding_boxes(fimage, lables):\n\n # initialize boxes array\n boxes = []\n\n for lable in lables:\n\n # iterate all lables\n\n # filter out image pixels with current lable\n labled = (fimage == lable) + 0\n\n # find indexes\n box = find_bounding_box(labled)\n\n # append found bouding box\n boxes.append(box)\n\n return boxes\n\ndef find_margined_bounding_boxes(fimage, lables, margins):\n\n # initialize boxes array\n boxes = []\n\n for lable in lables:\n\n # iterate all lables\n\n # filter out image pixels with current lable\n labled = (fimage == lable) + 0\n\n # find indexes\n box = find_bounding_box(labled, margins)\n\n # append found bouding box\n boxes.append(box)\n\n return boxes\n\ndef find_bounding_box(binary_matrix, margins=(0, 0)):\n\n # extract indexes of foreground pixels\n indicies = np.array(np.nonzero(binary_matrix + 0))\n\n # get contours\n ys = margins[1] + np.amin(indicies[0])\n ye = margins[1] + np.amax(indicies[0])\n\n xs = margins[0] + np.amin(indicies[1])\n xe = margins[0] + np.amax(indicies[1])\n\n # return contours\n return [(xs, ys), (xe, ye)]\n\ndef weightFilter(image, lables, weight):\n\n max = 0\n\n weights = np.zeros((lables))\n\n fimage = np.zeros_like(image)\n\n retained_lables = []\n\n for i in range(lables):\n weights[i] = np.sum(np.sum(image == i))\n\n if weights[i] > weights[max]:\n max = i\n\n if weights[i] > weight:\n fimage += np.uint8((image == i) + 0)\n retained_lables.append(i)\n\n fimage -= np.uint8((image == max) + 0)\n\n fimage = np.uint8(fimage * 255)\n\n boxes = []\n\n if (len(retained_lables) > 0):\n\n retained_lables.remove(max)\n boxes = find_bounding_boxes(image.copy(), retained_lables)\n\n return fimage, boxes\n\n\ndef weightFilterMini(image, weight):\n\n image = np.uint8(image)\n # extract contours\n image, contours, hierarchy = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n final_contours = []\n\n for cnt in contours:\n\n if cv2.contourArea(cnt) >= weight:\n\n # add it to final_contours\n final_contours.append(cnt)\n\n fimage = np.zeros((image.shape[:2]), np.uint8)\n cv2.drawContours(fimage, final_contours, -1, 255, -1)\n\n boxes = RBox.toPointBoundingBoxes(RBox.fromClassicalBoundingBoxes([cv2.boundingRect(cnt) for cnt in final_contours]))\n\n return fimage, boxes\n\ndef weightFilterMargined(image, lables, weight, margins):\n max = 0\n\n weights = np.zeros((lables))\n\n fimage = np.zeros_like(image)\n\n retained_lables = []\n\n for i in range(lables):\n\n weights[i] = np.sum(np.sum(image == i))\n\n if weights[i] > weights[max]:\n max = i\n\n if weights[i] > weight:\n fimage += np.uint8((image == i) + 0)\n retained_lables.append(i)\n\n fimage -= np.uint8(image == max)\n\n fimage = np.uint8(fimage * 255)\n\n boxes = []\n\n if (len(retained_lables) > 0):\n retained_lables.remove(max)\n boxes = find_margined_bounding_boxes(image.copy(), retained_lables, margins)\n\n return fimage, boxes\n\ndef calculatePossiblePadding(box, shape, default = 20):\n\n w_pad = default\n h_pad = default\n\n # dynamic padding\n if default == 0:\n\n rbox = RBox.fromPointBoundingBox(box)\n w_pad = round(0.205 * rbox.w)\n h_pad = round(0.205 * rbox.h)\n\n # extract with and height from shape\n height, width = shape[0:2]\n\n # extract starting, ending x and y from box\n ((x_start, y_start), (x_end, y_end)) = box\n\n # check if is it possible to add certain padding\n # if not add possible padding for all 4 points\n pad_x_start = h_pad\n if y_start - pad_x_start < 0:\n pad_x_start = y_start\n\n pad_y_start = w_pad\n if x_start - pad_y_start < 0:\n pad_y_start = x_start\n\n pad_x_end = w_pad\n if y_end + pad_x_end >= height:\n pad_x_end = height - y_end - 1\n\n pad_y_end = h_pad\n if x_end + pad_y_end >= width:\n pad_y_end = width - x_end - 1\n\n # return resultant padding\n return pad_x_start, pad_x_end, pad_y_start, pad_y_end\n\n\ndef findConnectedComponents(frame, threshold = 150, blur_radius = 1.0):\n\n img = frame.copy() # gray-scale image\n\n # smooth the image (to remove small objects)\n imgf = ndimage.gaussian_filter(img, blur_radius)\n\n # find connected components\n labeled, nr_objects = ndimage.label(imgf > threshold)\n\n return labeled, nr_objects\n\n\ndef drawBoundingBox(im, start, end, color):\n cv2.rectangle(im, start, end, color, 1)\n\ndef pwpBasedTracking(image, frame_models, threshold):\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))\n predicted = np.zeros((image.shape[0:2]), np.uint8)\n # FOREACH GIVEN PATCH AND ITS MODEL, APPLY MODEL TO PATCH\n for fm in frame_models:\n\n patch = extractPatch(image, fm[1])\n #patch = cv2.medianBlur(patch, 5)\n mask = np.zeros(patch.shape[0:2], np.uint8)\n res = applyModel(patch, mask, fm[0])\n res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)\n res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)\n res = cv2.morphologyEx(res, cv2.MORPH_OPEN, kernel)\n res = cv2.morphologyEx(res, cv2.MORPH_CLOSE, kernel)\n if(len(np.nonzero(res)[0]) > max(fm[2] * threshold, 10) ):\n predicted[fm[1][0]: fm[1][1], fm[1][2]: fm[1][3]] += res;\n\n return predicted\n\ndef extractPatch(im, box):\n\n # extract coordinates\n x1, x2, y1, y2 = box\n\n # extract and return patch\n return im[x1: x2, y1: y2, :]\n\ndef randomColor():\n\n return np.random.randint(0, 255, (1, 3))[0].tolist()\n\ndef performColorProcessing(image, mask, iterations = 1):\n\n # initialize kernel\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n\n for i in range(iterations):\n model = computePosteriors(image, np.uint8(mask > 0) + 0)\n mask = applyModel(image, mask, model)\n\n cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel=kernel)\n\n return mask\n\ndef killDyingLables(frame, mask, threshold = 0.5):\n\n # get initial weights of lables\n initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)]) + 0.00001\n\n # get final labled frame\n labled_frame = frame * mask\n\n # get final weights\n final_weights = np.array([np.sum(labled_frame == lable) for lable in range(np.amax(frame) + 1)])\n\n # final probabilites\n final_probs = (final_weights/initial_weights) < threshold\n\n for lable in range(len(final_probs)):\n\n dying = final_probs[lable]\n\n # check is lable is dying\n if dying:\n\n # kill lable\n labled_frame -= np.uint8((labled_frame == lable) * lable)\n\n # return final labled frame\n return labled_frame\n\ndef killSmallLables(frame, threshold = 150):\n\n # get initial weights of lables\n initial_weights = np.array([np.sum(frame == lable) for lable in range(np.amax(frame) + 1)])\n\n # final probabilites\n final_probs = initial_weights < threshold\n\n for lable in range(len(final_probs)):\n\n dying = final_probs[lable]\n\n # check is lable is dying\n if dying:\n\n # kill lable\n frame -= np.uint8(np.uint8(frame == lable) * lable)\n\n # return final labled frame\n return frame\n\nclass RBox:\n\n def __init__(self):\n\n # initialize atributes\n self.x = 0\n self.y = 0\n self.w = 0\n self.h = 0\n\n @staticmethod\n def fromClassicalBoundingBox(box):\n\n # initialize rbox\n rbox = RBox()\n\n # copy attributes\n rbox.x = box[0]\n rbox.y = box[1]\n rbox.w = box[2]\n rbox.h = box[3]\n\n # return rbox\n return rbox\n\n @staticmethod\n def fromClassicalBoundingBoxes(boxes):\n\n return [RBox.fromClassicalBoundingBox(box) for box in boxes]\n\n @staticmethod\n def fromRoughBoundingBox(box):\n\n # initialize rbox\n rbox = RBox()\n\n # copy attributes\n rbox.x = box[0]\n rbox.y = box[2]\n rbox.h = box[1] - box[0]\n rbox.w = box[3] - box[2]\n\n # return rbox\n return rbox\n\n @staticmethod\n def fromPointBoundingBox(box):\n\n # initialize rbox\n rbox = RBox()\n\n # copy attributes\n rbox.x = box[0][0]\n rbox.y = box[0][1]\n rbox.w = box[1][0] - box[0][0]\n rbox.h = box[1][1] - box[0][1]\n\n # return rbox\n return rbox\n\n @staticmethod\n def fromPointBoundingBoxes(boxes):\n\n return [RBox.fromPointBoundingBox(box) for box in boxes]\n\n def classicalBoundingBox(self):\n\n # return array like bounding box\n return [self.x, self.y, self.w, self.h]\n\n def pointBoundingBox(self):\n\n # return tuple of end points\n return ((self.x, self.y), (self.x + self.w, self.y + self.h))\n\n def area(self):\n\n return self.h * self.w\n\n def __or__(self, other_box):\n\n # initialize resultant box\n rbox = RBox()\n\n # calculate values\n rbox.x = min(self.x, other_box.x)\n rbox.y = min(self.y, other_box.y)\n rbox.w = max(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = max(self.y + self.h, other_box.y + other_box.h) - rbox.y\n\n return rbox\n\n def __and__(self, other_box):\n\n # initialize resultant box\n rbox = RBox()\n\n # calculate values\n rbox.x = max(self.x, other_box.x)\n rbox.y = max(self.y, other_box.y)\n rbox.w = min(self.x + self.w, other_box.x + other_box.w) - rbox.x\n rbox.h = min(self.y + self.h, other_box.y + other_box.h) - rbox.y\n\n if rbox.w < 0 or rbox.h < 0:\n\n # reinitailize or make it zero\n rbox = RBox()\n\n return rbox\n\n def similarity(self, other_box):\n\n # (A & B)/(A | B) = (A & B).area/(A.area + B.area - (A & B).area)\n #return (self & other_box).area()/(self.area() + other_box.area() - (self & other_box).area())\n min_area = min(self.area(), other_box.area())\n return (self & other_box).area()/min_area\n\n def __str__(self):\n\n return \"{} {} {} {}\".format(self.x, self.y, self.w, self.h)\n\n def __mul__(self, other_box):\n\n # calculate similarity and return\n return self.similarity(other_box)\n\n def __eq__(self, other):\n\n return self.x == other.x and self.y == other.y and self.w == other.w and self.h == other.h\n\n @staticmethod\n def similarityStats(boxes):\n\n # create matrix out of boxes\n sim_mat = np.array(boxes).reshape((-1, 1))\n sim_mat = np.tril(sim_mat.dot(sim_mat.T), -1)\n\n # return similarity matrix\n return sim_mat\n\n @staticmethod\n def similarityThreshold(boxes, threshold = 0.8):\n\n # get similarity matrix\n sim_mat = RBox.similarityStats(boxes)\n\n # find thresholded indexes\n ind = np.array(np.nonzero(sim_mat > threshold))\n\n # return in the form of list\n return list(ind.T)\n\n @staticmethod\n def reduceBoxes(boxes, threshold=0.8):\n\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n\n while len(similar_boxes) > 0:\n\n union = boxes[similar_boxes[0][1]] | boxes[similar_boxes[0][0]]\n\n # remove similar boxes\n del boxes[similar_boxes[0][0]]\n del boxes[similar_boxes[0][1]]\n\n boxes.append(union)\n\n similar_boxes = RBox.similarityThreshold(boxes, threshold)\n\n return boxes\n\n @staticmethod\n def toPointBoundingBoxes(boxes):\n\n return [box.pointBoundingBox() for box in boxes]\n\n @staticmethod\n def toClassicBoundingBoxes(boxes):\n\n return [box.classicalBoundingBox() for box in boxes]\n\n def extractPatchFromImage(self, image, square=False):\n\n # get bounding box end points\n (start, end) = self.pointBoundingBox()\n start, end = list(start), list(end)\n\n # check if square flag is on\n if square:\n\n im_h, im_w = image.shape[0:2]\n\n # adjust start and end so that height and width are equal\n if self.h != self.w:\n\n # find bigger size\n if self.h > self.w:\n\n # find difference\n diff = self.h - self.w\n\n if start[0] >= int(diff/2):\n\n start[0] -= math.floor(diff/2)\n diff -= math.floor(diff/2)\n else:\n\n diff -= start[0]\n start[0] = 0\n\n end[0] += diff\n\n if end[0] >= im_w:\n\n diff = end[0] - im_w + 1\n end[1] -= diff\n else:\n\n # find difference\n diff = self.w - self.h\n\n if start[1] >= int(diff / 2):\n\n start[1] -= math.floor(diff / 2)\n diff -= math.floor(diff / 2)\n else:\n\n diff -= start[1]\n start[1] = 0\n\n end[1] += diff\n\n if end[1] >= im_h:\n diff = end[1] - im_h + 1\n end[0] -= diff\n\n # return patch\n return image[start[1]: end[1], start[0]: end[0]]\n\n def addPatchtoImage(self, image, patch):\n\n # get bounding box end points\n (start, end) = self.pointBoundingBox()\n\n # patch in to image\n image[start[1]: end[1], start[0]: end[0]] = patch\n\n # return image\n return image\n\ndef askForLable(patch):\n\n # write an image to send\n cv2.imwrite(\"patch.jpg\", patch)\n\n # setup client socket\n clientSock = socket(AF_INET, SOCK_STREAM)\n clientSock.connect((TCP_IP, TCP_PORT))\n\n # open image\n image = open(\"patch.jpg\", 'rb')\n\n # read bytes equal to buffer size\n data = image.read(BUFFER_SIZE)\n\n # while image still has data\n while (data):\n\n # send data to server\n clientSock.send(data)\n\n # read more data if available\n data = image.read(BUFFER_SIZE)\n\n # close file\n image.close()\n\n # signal server to end data stream\n clientSock.shutdown(SHUT_WR)\n\n # recieved lable as binary data from server and convert it to string\n label = clientSock.recv(1024)\n label = label.decode(\"utf-8\")\n\n return label\n",
"step-ids": [
23,
26,
28,
37,
41
]
}
|
[
23,
26,
28,
37,
41
] |
from math import sqrt
from Engine.regulators.PID import PID
from Engine.regulators.regulator_base_class import RegulatorBaseClass
from Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED
from Util import Pose
from Util.geometry import clamp, normalize
from Util.pose import Position
from config.config import Config
config = Config()
class RealVelocityController(RegulatorBaseClass):
settings = {'kp': 10, 'ki': 0, 'kd': 1}
v_d = 4 # lower = bigger path correction
emergency_break_constant = 0.4 # Higher = higher correction of trajectory
emergency_break_safety_factor = 1 # lower = bigger break distance
def __init__(self):
self.orientation_controller = PID(**self.settings, signed_error=True, deadzone=0.05)
self.dt = 0
self.last_commanded_velocity = Position()
def execute(self, robot: Robot, dt):
self.dt = dt
speed_norm = self.get_next_speed(robot)
path_correction = self.following_path_vector(robot)
velocity = robot.position_error * speed_norm / robot.position_error.norm + path_correction * speed_norm / self.v_d
velocity /= max(1.0, abs(velocity.norm) / speed_norm)
cmd_orientation = self.orientation_controller.execute(robot.orientation_error)
cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)
self.last_commanded_velocity = velocity
return Pose(velocity, cmd_orientation)
def following_path_vector(self, robot):
direction_error = self.last_commanded_velocity - robot.velocity.position
if direction_error.norm > 0:
return normalize(direction_error)
else:
return direction_error
def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):
acceleration_offset = 1 # on veut que le robot soit plus aggressif en début de trajet
emergency_break_offset = self.emergency_break_constant / self.dt * (robot.current_speed / 1000) # on veut que le robot break le plus qu'il peut si on s'approche trop vite de la target
emergency_break_offset = max(1.0, emergency_break_offset)
if robot.target_speed > robot.current_speed:
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
if self.is_distance_for_break(robot, acc, offset=1):
next_speed = robot.current_speed + acc * self.dt * acceleration_offset
else:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
if robot.position_error.norm < (distance/self.emergency_break_safety_factor):
next_speed = robot.current_speed - acc * self.dt * emergency_break_offset
else:
next_speed = robot.current_speed - acc * self.dt
return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)
@staticmethod
def is_distance_for_break(robot, acc, offset=1) -> bool:
distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc
return robot.position_error.norm > (distance * offset)
def reset(self):
self.orientation_controller.reset()
class GrSimVelocityController(RealVelocityController):
settings = {'kp': 2, 'ki': 0.3, 'kd': 0}
v_d = 15
emergency_break_constant = 0
emergency_break_safety_factor = 1 # lower = bigger break distance
def is_time_to_break(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
offset = 1.2 # petite marge pour break avant le point vue qu'il y a du délais
dist_to_target = (destination - robot.pose.position).norm
return dist_to_target < (abs(cruise_speed ** 2 - target_speed**2) / (2 * acceleration)) * offset
def optimal_speed(robot, destination, cruise_speed, acceleration, target_speed):
# formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement
dist_to_target = (destination - robot.pose.position).norm
return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - target_speed**2)))
|
normal
|
{
"blob_id": "98bf0a332a6753e500b24bed2af16fe4a1cb9568",
"index": 1560,
"step-1": "<mask token>\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\n<mask token>\n",
"step-3": "<mask token>\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed\n ):\n dist_to_target = (destination - robot.pose.position).norm\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - \n target_speed ** 2)))\n",
"step-4": "from math import sqrt\nfrom Engine.regulators.PID import PID\nfrom Engine.regulators.regulator_base_class import RegulatorBaseClass\nfrom Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED\nfrom Util import Pose\nfrom Util.geometry import clamp, normalize\nfrom Util.pose import Position\nfrom config.config import Config\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4\n emergency_break_constant = 0.4\n emergency_break_safety_factor = 1\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=\n True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n path_correction = self.following_path_vector(robot)\n velocity = (robot.position_error * speed_norm / robot.\n position_error.norm + path_correction * speed_norm / self.v_d)\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.\n orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n self.last_commanded_velocity = velocity\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n direction_error = (self.last_commanded_velocity - robot.velocity.\n position)\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1\n emergency_break_offset = self.emergency_break_constant / self.dt * (\n robot.current_speed / 1000)\n emergency_break_offset = max(1.0, emergency_break_offset)\n if robot.target_speed > robot.current_speed:\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n elif self.is_distance_for_break(robot, acc, offset=1):\n next_speed = (robot.current_speed + acc * self.dt *\n acceleration_offset)\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.\n target_speed ** 2) / acc\n if (robot.position_error.norm < distance / self.\n emergency_break_safety_factor):\n next_speed = (robot.current_speed - acc * self.dt *\n emergency_break_offset)\n else:\n next_speed = robot.current_speed - acc * self.dt\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) ->bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2\n ) / acc\n return robot.position_error.norm > distance * offset\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration,\n target_speed):\n offset = 1.2\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < abs(cruise_speed ** 2 - target_speed ** 2) / (2 *\n acceleration) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed\n ):\n dist_to_target = (destination - robot.pose.position).norm\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - \n target_speed ** 2)))\n",
"step-5": "from math import sqrt\n\nfrom Engine.regulators.PID import PID\nfrom Engine.regulators.regulator_base_class import RegulatorBaseClass\nfrom Engine.robot import Robot, MAX_LINEAR_ACCELERATION, MAX_ANGULAR_SPEED\nfrom Util import Pose\nfrom Util.geometry import clamp, normalize\nfrom Util.pose import Position\nfrom config.config import Config\nconfig = Config()\n\n\nclass RealVelocityController(RegulatorBaseClass):\n\n settings = {'kp': 10, 'ki': 0, 'kd': 1}\n v_d = 4 # lower = bigger path correction\n emergency_break_constant = 0.4 # Higher = higher correction of trajectory\n emergency_break_safety_factor = 1 # lower = bigger break distance\n\n def __init__(self):\n self.orientation_controller = PID(**self.settings, signed_error=True, deadzone=0.05)\n self.dt = 0\n self.last_commanded_velocity = Position()\n\n def execute(self, robot: Robot, dt):\n self.dt = dt\n speed_norm = self.get_next_speed(robot)\n\n path_correction = self.following_path_vector(robot)\n\n velocity = robot.position_error * speed_norm / robot.position_error.norm + path_correction * speed_norm / self.v_d\n velocity /= max(1.0, abs(velocity.norm) / speed_norm)\n cmd_orientation = self.orientation_controller.execute(robot.orientation_error)\n cmd_orientation /= max(1.0, abs(cmd_orientation) / MAX_ANGULAR_SPEED)\n\n self.last_commanded_velocity = velocity\n\n return Pose(velocity, cmd_orientation)\n\n def following_path_vector(self, robot):\n\n direction_error = self.last_commanded_velocity - robot.velocity.position\n if direction_error.norm > 0:\n return normalize(direction_error)\n else:\n return direction_error\n\n def get_next_speed(self, robot, acc=MAX_LINEAR_ACCELERATION):\n acceleration_offset = 1 # on veut que le robot soit plus aggressif en début de trajet\n emergency_break_offset = self.emergency_break_constant / self.dt * (robot.current_speed / 1000) # on veut que le robot break le plus qu'il peut si on s'approche trop vite de la target\n emergency_break_offset = max(1.0, emergency_break_offset)\n\n if robot.target_speed > robot.current_speed:\n next_speed = robot.current_speed + acc * self.dt * acceleration_offset\n else:\n if self.is_distance_for_break(robot, acc, offset=1):\n next_speed = robot.current_speed + acc * self.dt * acceleration_offset\n else:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc\n if robot.position_error.norm < (distance/self.emergency_break_safety_factor):\n next_speed = robot.current_speed - acc * self.dt * emergency_break_offset\n else:\n next_speed = robot.current_speed - acc * self.dt\n\n return clamp(next_speed, -1 * robot.cruise_speed, robot.cruise_speed)\n\n @staticmethod\n def is_distance_for_break(robot, acc, offset=1) -> bool:\n distance = 0.5 * abs(robot.current_speed ** 2 - robot.target_speed ** 2) / acc\n return robot.position_error.norm > (distance * offset)\n\n def reset(self):\n self.orientation_controller.reset()\n\n\nclass GrSimVelocityController(RealVelocityController):\n\n settings = {'kp': 2, 'ki': 0.3, 'kd': 0}\n v_d = 15\n emergency_break_constant = 0\n emergency_break_safety_factor = 1 # lower = bigger break distance\n\n\ndef is_time_to_break(robot, destination, cruise_speed, acceleration, target_speed):\n # formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement\n offset = 1.2 # petite marge pour break avant le point vue qu'il y a du délais\n dist_to_target = (destination - robot.pose.position).norm\n return dist_to_target < (abs(cruise_speed ** 2 - target_speed**2) / (2 * acceleration)) * offset\n\n\ndef optimal_speed(robot, destination, cruise_speed, acceleration, target_speed):\n # formule physique: v_finale ** 2 = v_init ** 2 - 2 * acceleration * distance_deplacement\n dist_to_target = (destination - robot.pose.position).norm\n\n return max(cruise_speed, sqrt(abs(2 * acceleration * dist_to_target - target_speed**2)))\n",
"step-ids": [
10,
11,
13,
14,
15
]
}
|
[
10,
11,
13,
14,
15
] |
#-*- coding: utf-8 -*-
import django
if django.get_version() <= '1.3.1':
import apps.settings as settings
from django.core.management import setup_environ
setup_environ(settings)
elif django.get_version() >= '1.7.0':
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apps.settings")
django.setup()
elif django.get_version() >= '1.6.0': #ubuntu 14.04 used 1.6.?
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apps.settings")
from django.conf import settings
import os
import os.path
import traceback
cur_dir = os.path.dirname(os.path.abspath(__file__))
LOGFILE = os.path.join(cur_dir,"logs","oneclick.log")
file_list = ['import_test', 'import_test_dev', 'import_test_local','settings', 'manage', 'settings_dev', 'manage_dev', 'settings_stg','manage_stg', 'settings_local','manage_local']
exclude_dir = ['.svn', 'realtime_pvp']
def run_dir(py_dir):
log_f = open(LOGFILE, 'a+')
try:
for root, dirs, files in os.walk(py_dir):
if os.path.basename(root) not in exclude_dir:
for f in files:
name, ext = os.path.splitext(f)
if ext == '.py' and name not in file_list:
root = root.replace(py_dir, '').replace('/', '.').replace('\\', '.')
print root, name
log_f.write(str(root) + str(name) + '\n')
if root:
__import__('apps.' + root, globals(), locals(), [name], -1)
else:
__import__('apps.' + name, globals(), locals(), [], -1)
log_f.close()
except:
err_info = traceback.format_exc()
print err_info
log_f.write(err_info+ '\n')
log_f.close()
if __name__ == '__main__':
run_dir(settings.BASE_ROOT+'/apps/')
|
normal
|
{
"blob_id": "8894b73829978cec29aab6ee8bf09700da7fb59f",
"index": 5659,
"step-1": "#-*- coding: utf-8 -*-\n\nimport django\n\nif django.get_version() <= '1.3.1':\n import apps.settings as settings\n from django.core.management import setup_environ\n setup_environ(settings)\nelif django.get_version() >= '1.7.0': \n import os\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"apps.settings\")\n django.setup()\nelif django.get_version() >= '1.6.0': #ubuntu 14.04 used 1.6.?\n import os\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"apps.settings\")\n from django.conf import settings\n\n\nimport os\nimport os.path\nimport traceback\n\ncur_dir = os.path.dirname(os.path.abspath(__file__))\nLOGFILE = os.path.join(cur_dir,\"logs\",\"oneclick.log\")\nfile_list = ['import_test', 'import_test_dev', 'import_test_local','settings', 'manage', 'settings_dev', 'manage_dev', 'settings_stg','manage_stg', 'settings_local','manage_local']\nexclude_dir = ['.svn', 'realtime_pvp']\n\ndef run_dir(py_dir):\n log_f = open(LOGFILE, 'a+')\n try:\n for root, dirs, files in os.walk(py_dir):\n if os.path.basename(root) not in exclude_dir:\n for f in files:\n name, ext = os.path.splitext(f)\n if ext == '.py' and name not in file_list:\n root = root.replace(py_dir, '').replace('/', '.').replace('\\\\', '.')\n print root, name\n log_f.write(str(root) + str(name) + '\\n')\n if root:\n __import__('apps.' + root, globals(), locals(), [name], -1)\n else:\n __import__('apps.' + name, globals(), locals(), [], -1)\n log_f.close()\n except:\n err_info = traceback.format_exc()\n print err_info\n log_f.write(err_info+ '\\n')\n log_f.close()\n\nif __name__ == '__main__':\n run_dir(settings.BASE_ROOT+'/apps/')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import requests
import datetime
import collections
import csv
import sys
import os
import os.path
History = collections.namedtuple('History', ['open', 'high', 'low', 'close', 'volume', 'adjustment'])
def history(symbol, since, until):
response = requests.get('http://ichart.finance.yahoo.com/table.csv?s=%s&d=%d&e=%d&f=%d&g=d&a=%d&b=%d&c=%d&ignore=.csv' % (
symbol,
until.month - 1,
until.day,
until.year,
since.month - 1,
since.day,
since.year,
))
for row in csv.reader(response.text.split('\n')[::-1][1:-1]):
yield History._make(map(float, row[1:]))
def last(symbol, start, number):
until = start - datetime.timedelta(days=1)
if until.weekday() == 6:
until -= datetime.timedelta(days=2)
elif until.weekday() == 0:
until -= datetime.timedelta(days=1)
since = until - datetime.timedelta(days=number - 1)
if since.weekday() in [0, 6]:
since -= datetime.timedelta(days=2)
return history(symbol, since, until)
def recent(symbol):
response = requests.get('http://download.finance.yahoo.com/d/quotes.csv?s=%s&f=d1ohgpvp&e=.csv' % symbol)
return History._make(map(float, csv.reader(response.text.split('\n', 1)).next()[1:]))
def qualify(symbol):
today = datetime.date.today()
data = dict(zip(['yy', 'y'], last(symbol, today, 2)))
try:
data['t'] = recent(symbol)
except ValueError:
return False
return data['yy'].close < data['y'].low and data['y'].close > data['t'].low
def process():
if len(sys.argv) > 1:
symbols = sys.argv[1:]
else:
symbols = []
for entry in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')):
symbols.append(entry.rsplit('.', 1)[0])
for symbol in symbols:
symbol = symbol.upper()
if symbol.strip() and qualify(symbol):
print symbol
if __name__ == '__main__':
process()
|
normal
|
{
"blob_id": "1cccb37a7195b1555513a32ef33b35b0edcd5eb1",
"index": 5363,
"step-1": "import requests\nimport datetime\nimport collections\nimport csv\nimport sys\nimport os\nimport os.path\n\n\nHistory = collections.namedtuple('History', ['open', 'high', 'low', 'close', 'volume', 'adjustment'])\n\ndef history(symbol, since, until):\n response = requests.get('http://ichart.finance.yahoo.com/table.csv?s=%s&d=%d&e=%d&f=%d&g=d&a=%d&b=%d&c=%d&ignore=.csv' % (\n symbol,\n until.month - 1,\n until.day,\n until.year,\n since.month - 1,\n since.day,\n since.year,\n ))\n for row in csv.reader(response.text.split('\\n')[::-1][1:-1]):\n yield History._make(map(float, row[1:]))\n\ndef last(symbol, start, number):\n until = start - datetime.timedelta(days=1)\n if until.weekday() == 6:\n until -= datetime.timedelta(days=2)\n elif until.weekday() == 0:\n until -= datetime.timedelta(days=1)\n since = until - datetime.timedelta(days=number - 1)\n if since.weekday() in [0, 6]:\n since -= datetime.timedelta(days=2)\n return history(symbol, since, until)\n \ndef recent(symbol):\n response = requests.get('http://download.finance.yahoo.com/d/quotes.csv?s=%s&f=d1ohgpvp&e=.csv' % symbol)\n return History._make(map(float, csv.reader(response.text.split('\\n', 1)).next()[1:]))\n\ndef qualify(symbol):\n today = datetime.date.today()\n data = dict(zip(['yy', 'y'], last(symbol, today, 2)))\n try:\n data['t'] = recent(symbol)\n except ValueError:\n return False\n return data['yy'].close < data['y'].low and data['y'].close > data['t'].low\n\ndef process():\n if len(sys.argv) > 1:\n symbols = sys.argv[1:]\n else:\n symbols = []\n for entry in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')):\n symbols.append(entry.rsplit('.', 1)[0])\n for symbol in symbols:\n symbol = symbol.upper()\n if symbol.strip() and qualify(symbol):\n print symbol\n\nif __name__ == '__main__':\n process()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import sys
import time
import netifaces
import requests
_GET_ADDR_MAX_ITERATION = 50
_POST_CALLBACK_MAX_ITERATION =50
_RETRY_INTERVAL = 5
def _process_error(message):
sys.stderr.write(message)
sys.stderr.write('\n')
sys.exit(1)
def _parse_kernel_cmdline():
"""Parse linux kernel command line"""
with open('/proc/cmdline', 'rt') as f:
cmdline = f.read()
parameters = {}
for p in cmdline.split():
name, _, value = p.partition('=')
parameters[name] = value
return parameters
def _get_interface_ip(mac_addr):
""""Get IP address of interface by mac."""
interfaces = netifaces.interfaces()
for iface in interfaces:
addresses = netifaces.ifaddresses(iface)
link_addresses = addresses.get(netifaces.AF_LINK, [])
for link_addr in link_addresses:
if link_addr.get('addr') == mac_addr:
ip_addresses = addresses.get(netifaces.AF_INET)
if ip_addresses:
# NOTE: return first address, ironic API does not
# support multiple
return ip_addresses[0].get('addr')
else:
break
def main():
"""Script informs Ironic that bootstrap loading is done.
There are three mandatory parameters in kernel command line.
Ironic prepares these two:
'ironic_api_url' - URL of Ironic API service,
'deployment_id' - UUID of the node in Ironic.
Passed from PXE boot loader:
'BOOTIF' - MAC address of the boot interface.
"""
kernel_params = _parse_kernel_cmdline()
api_url = kernel_params.get('ironic_api_url')
deployment_id = kernel_params.get('deployment_id')
inspect = kernel_params.get('inspect')
# TODO(aarefiev): change ssh driver
ironic_driver = kernel_params.get('callback-driver-name', 'ansible_ssh')
if inspect and api_url is None:
_process_error('Ironic ansible callback: Mandatory parameter '
'"ironic_api_url" is missing.')
if api_url is None or deployment_id is None:
_process_error('Mandatory parameter ("ironic_api_url" or '
'"deployment_id") is missing.')
boot_mac = kernel_params.get('BOOTIF')
if boot_mac is None:
_process_error('Cannot define boot interface, "BOOTIF" parameter is '
'missing.')
# There is a difference in syntax in BOOTIF variable between pxe and ipxe
# boot with Ironic. For pxe boot the the leading `01-' denotes the device type
# (Ethernet) and is not a part of the MAC address
if boot_mac.startswith('01-'):
boot_mac = boot_mac[3:].replace('-', ':')
for n in range(_GET_ADDR_MAX_ITERATION):
boot_ip = _get_interface_ip(boot_mac)
if boot_ip is not None:
break
time.sleep(_RETRY_INTERVAL)
else:
_process_error('Cannot find IP address of boot interface.')
data = {"callback_url": "ssh://" + boot_ip}
if inspect:
passthru = ('%(api-url)s/v1/drivers/%(driver)s/vendor_passthru'
'/inspect' % {'api-url': api_url,
'driver': ironic_driver}
else:
passthru = '%(api-url)s/v1/nodes/%(deployment_id)s/vendor_passthru' \
'/heartbeat' % {'api-url': api_url,
'deployment_id': deployment_id}
for attempt in range(_POST_CALLBACK_MAX_ITERATION):
try:
resp = requests.post(passthru, data=json.dumps(data),
headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
except Exception as e:
error = str(e)
else:
if resp.status_code != 202:
error= ('Wrong status code %d returned from Ironic API' %
resp.status_code)
else:
break
if attempt == (_POST_CALLBACK_MAX_ITERATION - 1):
_process_error(error)
time.sleep(_RETRY_INTERVAL)
if __name__ == '__main__':
sys.exit(main())
|
normal
|
{
"blob_id": "8dab85622a29bc40f8ad6150f9e6f284853aeaf8",
"index": 4235,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport json\nimport sys\nimport time\n\nimport netifaces\nimport requests\n\n\n_GET_ADDR_MAX_ITERATION = 50\n_POST_CALLBACK_MAX_ITERATION =50\n_RETRY_INTERVAL = 5\n\n\ndef _process_error(message):\n sys.stderr.write(message)\n sys.stderr.write('\\n')\n sys.exit(1)\n\n\ndef _parse_kernel_cmdline():\n \"\"\"Parse linux kernel command line\"\"\"\n with open('/proc/cmdline', 'rt') as f:\n cmdline = f.read()\n parameters = {}\n for p in cmdline.split():\n name, _, value = p.partition('=')\n parameters[name] = value\n return parameters\n\ndef _get_interface_ip(mac_addr):\n \"\"\"\"Get IP address of interface by mac.\"\"\"\n interfaces = netifaces.interfaces()\n for iface in interfaces:\n addresses = netifaces.ifaddresses(iface)\n link_addresses = addresses.get(netifaces.AF_LINK, [])\n for link_addr in link_addresses:\n if link_addr.get('addr') == mac_addr:\n ip_addresses = addresses.get(netifaces.AF_INET)\n if ip_addresses:\n # NOTE: return first address, ironic API does not\n # support multiple\n return ip_addresses[0].get('addr')\n else:\n break\n\ndef main():\n \"\"\"Script informs Ironic that bootstrap loading is done.\n\n There are three mandatory parameters in kernel command line.\n Ironic prepares these two:\n 'ironic_api_url' - URL of Ironic API service,\n 'deployment_id' - UUID of the node in Ironic.\n Passed from PXE boot loader:\n 'BOOTIF' - MAC address of the boot interface.\n \"\"\"\n kernel_params = _parse_kernel_cmdline()\n api_url = kernel_params.get('ironic_api_url')\n deployment_id = kernel_params.get('deployment_id')\n inspect = kernel_params.get('inspect')\n # TODO(aarefiev): change ssh driver\n ironic_driver = kernel_params.get('callback-driver-name', 'ansible_ssh')\n if inspect and api_url is None:\n _process_error('Ironic ansible callback: Mandatory parameter '\n '\"ironic_api_url\" is missing.')\n if api_url is None or deployment_id is None:\n _process_error('Mandatory parameter (\"ironic_api_url\" or '\n '\"deployment_id\") is missing.')\n\n boot_mac = kernel_params.get('BOOTIF')\n if boot_mac is None:\n _process_error('Cannot define boot interface, \"BOOTIF\" parameter is '\n 'missing.')\n\n # There is a difference in syntax in BOOTIF variable between pxe and ipxe\n # boot with Ironic. For pxe boot the the leading `01-' denotes the device type\n # (Ethernet) and is not a part of the MAC address\n if boot_mac.startswith('01-'):\n boot_mac = boot_mac[3:].replace('-', ':')\n\n for n in range(_GET_ADDR_MAX_ITERATION):\n boot_ip = _get_interface_ip(boot_mac)\n if boot_ip is not None:\n break\n time.sleep(_RETRY_INTERVAL)\n else:\n _process_error('Cannot find IP address of boot interface.')\n\n data = {\"callback_url\": \"ssh://\" + boot_ip}\n\n if inspect:\n passthru = ('%(api-url)s/v1/drivers/%(driver)s/vendor_passthru'\n '/inspect' % {'api-url': api_url,\n 'driver': ironic_driver}\n else:\n passthru = '%(api-url)s/v1/nodes/%(deployment_id)s/vendor_passthru' \\\n '/heartbeat' % {'api-url': api_url,\n 'deployment_id': deployment_id}\n\n for attempt in range(_POST_CALLBACK_MAX_ITERATION):\n try:\n resp = requests.post(passthru, data=json.dumps(data),\n headers={'Content-Type': 'application/json',\n 'Accept': 'application/json'})\n except Exception as e:\n error = str(e)\n else:\n if resp.status_code != 202:\n error= ('Wrong status code %d returned from Ironic API' %\n resp.status_code)\n else:\n break\n\n if attempt == (_POST_CALLBACK_MAX_ITERATION - 1):\n _process_error(error)\n\n time.sleep(_RETRY_INTERVAL)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import Ploneboard
import PloneboardForum
import PloneboardConversation
import PloneboardComment
|
normal
|
{
"blob_id": "abdf5aee77ee879c50d0e605d5fd95e28a7ef7aa",
"index": 5631,
"step-1": "<mask token>\n",
"step-2": "import Ploneboard\nimport PloneboardForum\nimport PloneboardConversation\nimport PloneboardComment\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-20 08:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('age', models.IntegerField(choices=[(-1, 'not defined'), (0, '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6')], default=-1)),
('sex', models.IntegerField(choices=[(1, 'dziewczynka'), (2, 'chłopiec')], default=1)),
('whose_child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('creation_date', models.DateTimeField(default=django.utils.timezone.now)),
('is_read', models.BooleanField(default=False)),
('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Parent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('children', models.ManyToManyField(to='placyk_app.Child')),
],
),
migrations.CreateModel(
name='Pground',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('place', models.CharField(max_length=128)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Quarter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('not defined', 0), ('Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined', max_length=64)),
],
),
migrations.CreateModel(
name='Visit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_from', models.DateTimeField()),
('time_to', models.DateTimeField()),
('pground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Pground')),
('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='pground',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='quarter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),
),
migrations.AddField(
model_name='parent',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
normal
|
{
"blob_id": "e68d872232b3eab4c33cbbe4376be7dd788888e2",
"index": 1242,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Child', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,\n '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'\n )], default=-1)), ('sex', models.IntegerField(choices=[(1,\n 'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('content', models.TextField(\n )), ('creation_date', models.DateTimeField(default=django.utils.\n timezone.now)), ('is_read', models.BooleanField(default=False)), (\n 'receiver', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='message_receiver', to=settings.\n AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.CreateModel(name='Parent', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('children', models.ManyToManyField(to=\n 'placyk_app.Child'))]), migrations.CreateModel(name='Pground',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('place', models.CharField(\n max_length=128)), ('description', models.TextField())]), migrations\n .CreateModel(name='Quarter', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(choices=[('not defined', 0), (\n 'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',\n max_length=64))]), migrations.CreateModel(name='Visit', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('time_from', models.\n DateTimeField()), ('time_to', models.DateTimeField()), ('pground',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.\n db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.AddField(model_name='pground', name='quarter', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Quarter')), migrations.AddField(model_name='parent',\n name='quarter', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(\n model_name='parent', name='user', field=models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Child', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=\n 128)), ('age', models.IntegerField(choices=[(-1, 'not defined'), (0,\n '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'\n )], default=-1)), ('sex', models.IntegerField(choices=[(1,\n 'dziewczynka'), (2, 'chłopiec')], default=1)), ('whose_child',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n settings.AUTH_USER_MODEL))]), migrations.CreateModel(name='Message',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('content', models.TextField(\n )), ('creation_date', models.DateTimeField(default=django.utils.\n timezone.now)), ('is_read', models.BooleanField(default=False)), (\n 'receiver', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, related_name='message_receiver', to=settings.\n AUTH_USER_MODEL)), ('sender', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.CreateModel(name='Parent', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('children', models.ManyToManyField(to=\n 'placyk_app.Child'))]), migrations.CreateModel(name='Pground',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('place', models.CharField(\n max_length=128)), ('description', models.TextField())]), migrations\n .CreateModel(name='Quarter', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('name', models.CharField(choices=[('not defined', 0), (\n 'Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined',\n max_length=64))]), migrations.CreateModel(name='Visit', fields=[(\n 'id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('time_from', models.\n DateTimeField()), ('time_to', models.DateTimeField()), ('pground',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Pground')), ('who', models.ForeignKey(on_delete=django.\n db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL))]),\n migrations.AddField(model_name='pground', name='quarter', field=\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'placyk_app.Quarter')), migrations.AddField(model_name='parent',\n name='quarter', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='placyk_app.Quarter')), migrations.AddField(\n model_name='parent', name='user', field=models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.2 on 2017-07-20 08:05\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Child',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=128)),\n ('age', models.IntegerField(choices=[(-1, 'not defined'), (0, '0 - 1'), (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6')], default=-1)),\n ('sex', models.IntegerField(choices=[(1, 'dziewczynka'), (2, 'chłopiec')], default=1)),\n ('whose_child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('content', models.TextField()),\n ('creation_date', models.DateTimeField(default=django.utils.timezone.now)),\n ('is_read', models.BooleanField(default=False)),\n ('receiver', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_receiver', to=settings.AUTH_USER_MODEL)),\n ('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Parent',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('children', models.ManyToManyField(to='placyk_app.Child')),\n ],\n ),\n migrations.CreateModel(\n name='Pground',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('place', models.CharField(max_length=128)),\n ('description', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Quarter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(choices=[('not defined', 0), ('Bronowice Małe', 1), ('Krowodrza', 2)], default='not defined', max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='Visit',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('time_from', models.DateTimeField()),\n ('time_to', models.DateTimeField()),\n ('pground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Pground')),\n ('who', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='pground',\n name='quarter',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),\n ),\n migrations.AddField(\n model_name='parent',\n name='quarter',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='placyk_app.Quarter'),\n ),\n migrations.AddField(\n model_name='parent',\n name='user',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import time
import pymorphy2
import pyglet
import pyttsx3
import threading
import warnings
import pytils
warnings.filterwarnings("ignore")
""" Количество раундов, вдохов в раунде, задержка дыхания на вдохе"""
rounds, breaths, hold = 4, 30, 13
def play_wav(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
time.sleep(wav.duration)
def play_wav_inline(src):
wav = pyglet.media.load(sys.path[0] + '\\src\\wav\\' + src + '.wav')
wav.play()
def correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):
new_phrase = []
py_gen = 1
phrase = phrase.split(' ')
while phrase:
word = phrase.pop(-1)
if 'NUMB' in morph.parse(word)[0].tag:
new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))
else:
new_phrase.append(word)
py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE
return ' '.join(new_phrase[::-1])
def nums(phrase, morph=pymorphy2.MorphAnalyzer()):
""" согласование существительных с числительными, стоящими перед ними """
phrase = phrase.replace(' ', ' ').replace(',', ' ,')
numeral = ''
new_phrase = []
for word in phrase.split(' '):
if 'NUMB' in morph.parse(word)[0].tag:
numeral = word
if numeral:
word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word)
new_phrase.append(word)
return ' '.join(new_phrase).replace(' ,', ',')
def speak(what):
speech_voice = 3 # голосовой движок
rate = 120
tts = pyttsx3.init()
voices = tts.getProperty("voices")
tts.setProperty('rate', rate)
tts.setProperty("voice", voices[speech_voice].id)
print('🔊', what)
what = correct_numerals(what)
tts.say(what)
tts.runAndWait()
# tts.stop()
class Workout:
def __init__(self, rounds=3, breaths=30, hold=15):
self.rounds = rounds
self.breaths = breaths
self.hold = hold
self.round_times = []
self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков
def __str__(self):
return '\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)
def __hold_breath(self):
start_time = time.time()
input()
seconds = int(time.time() - start_time)
mins = seconds // 60
secs = seconds % 60
self.round_times.append('{:02}:{:02}'.format(mins, secs))
play_wav_inline('inhale')
self.say('Глубокий вдох. ' + nums("{} минута {} секунда".format(mins, secs)))
def __clock_tick(self):
for i in range(self.hold):
if i < hold - 3:
time.sleep(1)
else:
play_wav('clock')
play_wav_inline('gong2')
def __breathe_round(self, round):
self.say('Раунд ' + str(round))
for i in range(self.breaths):
if i % 10 == 0:
play_wav_inline('gong')
play_wav('inhale')
print(i + 1, end=' ')
play_wav('exhale')
print()
self.say('Задерживаем дыхание на выдохе')
self.__hold_breath()
# self.say('Держим ' + nums(str(self.hold) + ' секунда'))
self.__clock_tick()
play_wav_inline('exhale')
self.say('Выдох')
time.sleep(1)
def breathe(self):
self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))
self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох'))
self.say('Приготовились...')
for i in range(self.rounds):
self.__breathe_round(i + 1)
self.say('Восстанавливаем дыхание.')
def statistics(self):
print('=============')
for i in range(len(self.round_times)):
print('Раунд', i, self.round_times[i])
print('=============')
def say(self, what):
self.lock.acquire()
thread = threading.Thread(target=speak, kwargs={'what': what})
thread.start()
thread.join()
self.lock.release()
workout = Workout(rounds, breaths, hold)
workout.breathe()
workout.statistics()
|
normal
|
{
"blob_id": "a98be930058269a6adbc9a28d1c0ad5d9abba136",
"index": 35,
"step-1": "<mask token>\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<mask token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\n<mask token>\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\n<mask token>\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\n<mask token>\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\n<mask token>\n",
"step-4": "import sys\nimport time\nimport pymorphy2\nimport pyglet\nimport pyttsx3\nimport threading\nimport warnings\nimport pytils\nwarnings.filterwarnings('ignore')\n<mask token>\nrounds, breaths, hold = 4, 30, 13\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0\n ].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(\n numeral))).word)\n new_phrase.append(word)\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty('voices')\n tts.setProperty('rate', rate)\n tts.setProperty('voice', voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock()\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums('{} минута {} секунда'.format(\n mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) +\n ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\nworkout = Workout(rounds, breaths, hold)\nworkout.breathe()\nworkout.statistics()\n",
"step-5": "import sys\nimport time\nimport pymorphy2\nimport pyglet\nimport pyttsx3\nimport threading\nimport warnings\nimport pytils\n\nwarnings.filterwarnings(\"ignore\")\n\n\"\"\" Количество раундов, вдохов в раунде, задержка дыхания на вдохе\"\"\"\nrounds, breaths, hold = 4, 30, 13\n\n\ndef play_wav(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n time.sleep(wav.duration)\n\n\ndef play_wav_inline(src):\n wav = pyglet.media.load(sys.path[0] + '\\\\src\\\\wav\\\\' + src + '.wav')\n wav.play()\n\n\ndef correct_numerals(phrase, morph=pymorphy2.MorphAnalyzer()):\n new_phrase = []\n py_gen = 1\n phrase = phrase.split(' ')\n while phrase:\n word = phrase.pop(-1)\n if 'NUMB' in morph.parse(word)[0].tag:\n new_phrase.append(pytils.numeral.sum_string(int(word), py_gen))\n else:\n new_phrase.append(word)\n py_gen = pytils.numeral.FEMALE if 'femn' in morph.parse(word)[0].tag else pytils.numeral.MALE\n return ' '.join(new_phrase[::-1])\n\n\ndef nums(phrase, morph=pymorphy2.MorphAnalyzer()):\n \"\"\" согласование существительных с числительными, стоящими перед ними \"\"\"\n phrase = phrase.replace(' ', ' ').replace(',', ' ,')\n numeral = ''\n new_phrase = []\n for word in phrase.split(' '):\n if 'NUMB' in morph.parse(word)[0].tag:\n numeral = word\n if numeral:\n word = str(morph.parse(word)[0].make_agree_with_number(abs(int(numeral))).word)\n new_phrase.append(word)\n\n return ' '.join(new_phrase).replace(' ,', ',')\n\n\ndef speak(what):\n speech_voice = 3 # голосовой движок\n rate = 120\n tts = pyttsx3.init()\n voices = tts.getProperty(\"voices\")\n tts.setProperty('rate', rate)\n tts.setProperty(\"voice\", voices[speech_voice].id)\n print('🔊', what)\n what = correct_numerals(what)\n tts.say(what)\n tts.runAndWait()\n # tts.stop()\n\n\nclass Workout:\n\n def __init__(self, rounds=3, breaths=30, hold=15):\n self.rounds = rounds\n self.breaths = breaths\n self.hold = hold\n self.round_times = []\n self.lock = threading.Lock() # взаимоблокировка отдельных голосовых потоков\n\n def __str__(self):\n return '\\n♻{} 🗣{} ⏱{}'.format(self.rounds, self.breaths, self.hold)\n\n def __hold_breath(self):\n start_time = time.time()\n input()\n seconds = int(time.time() - start_time)\n mins = seconds // 60\n secs = seconds % 60\n self.round_times.append('{:02}:{:02}'.format(mins, secs))\n play_wav_inline('inhale')\n self.say('Глубокий вдох. ' + nums(\"{} минута {} секунда\".format(mins, secs)))\n\n def __clock_tick(self):\n for i in range(self.hold):\n if i < hold - 3:\n time.sleep(1)\n else:\n play_wav('clock')\n play_wav_inline('gong2')\n\n def __breathe_round(self, round):\n self.say('Раунд ' + str(round))\n for i in range(self.breaths):\n if i % 10 == 0:\n play_wav_inline('gong')\n play_wav('inhale')\n print(i + 1, end=' ')\n play_wav('exhale')\n print()\n self.say('Задерживаем дыхание на выдохе')\n self.__hold_breath()\n # self.say('Держим ' + nums(str(self.hold) + ' секунда'))\n self.__clock_tick()\n play_wav_inline('exhale')\n self.say('Выдох')\n time.sleep(1)\n\n def breathe(self):\n self.say('Выполняем ' + nums(str(self.rounds) + ' раунд'))\n self.say('Каждый раунд это ' + nums(str(self.breaths) + ' глубокий вдох - и спокойный выдох'))\n self.say('Приготовились...')\n for i in range(self.rounds):\n self.__breathe_round(i + 1)\n self.say('Восстанавливаем дыхание.')\n\n def statistics(self):\n print('=============')\n for i in range(len(self.round_times)):\n print('Раунд', i, self.round_times[i])\n print('=============')\n\n def say(self, what):\n self.lock.acquire()\n thread = threading.Thread(target=speak, kwargs={'what': what})\n thread.start()\n thread.join()\n self.lock.release()\n\n\nworkout = Workout(rounds, breaths, hold)\nworkout.breathe()\n\nworkout.statistics()\n",
"step-ids": [
10,
11,
13,
17,
18
]
}
|
[
10,
11,
13,
17,
18
] |
from __future__ import division
from pyoperators import pcg
from pysimulators import profile
from qubic import (
create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition,
QubicPlanckAcquisition, QubicInstrument)
from qubic.data import PATH
from qubic.io import read_map
import healpy as hp
import matplotlib.pyplot as mp
import numpy as np
def statstr(vec):
m=np.mean(vec)
s=np.std(vec)
return '{0:.4f} +/- {1:.4f}'.format(m,s)
def plotinst(inst,shift=0.12):
for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):
if quad < 4:
plot(xyc[0],xyc[1],'ro')
else:
plot(xyc[0]+shift,xyc[1],'bo')
xlim(-0.06, 0.18)
def display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):
out = []
for i, (kind, lim) in enumerate(zip('IQU', Trange)):
map = input[..., i]
out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,
max=lim, title=msg + ' ' + kind,
sub=(3, 3, iplot + i), return_projected_map=True)]
return out
def profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True, color=None):
if range == None:
mini = np.min(x)
maxi = np.max(x)
else:
mini = range[0]
maxi = range[1]
dx = (maxi - mini) / nbins
xmin = np.linspace(mini,maxi-dx,nbins)
xmax = xmin + dx
xc = xmin + dx / 2
yval = np.zeros(nbins)
dy = np.zeros(nbins)
dx = np.zeros(nbins) + dx / 2
for i in np.arange(nbins):
ok = (x > xmin[i]) & (x < xmax[i])
yval[i] = np.mean(y[ok])
if dispersion:
fact = 1
else:
fact = np.sqrt(len(y[ok]))
dy[i] = np.std(y[ok])/fact
if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)
return xc, yval, dx, dy
nside = 256
racenter = 0.0 # deg
deccenter = -57.0 # deg
center = equ2gal(racenter, deccenter)
sky = read_map(PATH + 'syn256_pol.fits')
sampling = create_random_pointings([racenter, deccenter], 1000, 10)
all_solutions_fusion = []
all_coverages = []
nbptg = np.linspace(1000,5000,5)
correct_time = 365*86400./(nbptg/1000)
detector_nep = 4.7e-17/np.sqrt(correct_time / len(sampling)*sampling.period)
for i in xrange(len(all_instruments)):
acq_qubic = QubicAcquisition(150, sampling, nside=nside,
detector_nep=detector_nep[i])
all_coverages.append(acq_qubic.get_coverage())
convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)
acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky)
acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)
H = acq_fusion.get_operator()
invntt = acq_fusion.get_invntt_operator()
obs = acq_fusion.get_observation()
A = H.T * invntt * H
b = H.T * invntt * obs
solution_fusion = pcg(A, b, disp=True)
all_solutions_fusion.append(solution_fusion)
mask = all_coverages[0] > np.max(all_coverages[0]/10)
reso=3
Trange=[10, 10, 10]
for i in xrange(len(nbptg)):
figure(i)
resid = all_solutions_fusion[i]['x'] - convolved_sky
resid[~mask,:] = 0
display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)
print(std(resid[mask,0]), std(resid[mask,1]), std(resid[mask,2]))
#savefig(names[i]+'.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
subplot(3,1,1)
yscale('log')
xlabel('Normalized coverage')
ylabel('I RMS residuals')
ylim(0.1,2)
plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(idata[0], idata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
yscale('log')
xlabel('Normalized coverage')
ylabel('Q RMS residuals')
ylim(0.1,2)
plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(qdata[0], qdata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
yscale('log')
xlabel('Normalized coverage')
ylabel('U RMS residuals')
ylim(0.1,2)
plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)
if i==0: plot(udata[0], udata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)
legend(fontsize=fs, loc='upper right')
#savefig('rms.png')
cols=['black', 'red','blue','green', 'orange']
aa=0.2
rng = [-2,4]
fs=8
nb=20
clf()
for i in xrange(len(all_instruments)):
resid = all_solutions_fusion[i]['x'] - convolved_sky
idata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)
qdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)
udata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)
if i == 0 :
theidata = idata
theqdata = qdata
theudata = udata
subplot(3,1,1)
xlabel('Normalized coverage')
ylabel('I RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
plot(idata[0], idata[3]/theidata[3], color=cols[i], label=names[i], lw=2)
legend(fontsize=fs, loc='upper right')
subplot(3,1,2)
xlabel('Normalized coverage')
ylabel('Q RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(qdata[0], qdata[3]/theqdata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
subplot(3,1,3)
xlabel('Normalized coverage')
ylabel('U RMS residuals ratio \n w.r.t. Full Instrument')
ylim(0.,3)
plot(udata[0], udata[3]/theudata[3], color=cols[i], label=names[i], lw=2)
plot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')
legend(fontsize=fs, loc='upper right')
#savefig('rms_ratio.png')
|
normal
|
{
"blob_id": "bcb028bd25732e17ed1478e122ac3b2d1abf2520",
"index": 7931,
"step-1": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\n<mask token>\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\n<mask token>\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\n<mask token>\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n<mask token>\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-3": "<mask token>\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0\ndeccenter = -57.0\ncenter = equ2gal(racenter, deccenter)\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\nall_solutions_fusion = []\nall_coverages = []\nnbptg = np.linspace(1000, 5000, 5)\ncorrect_time = 365 * 86400.0 / (nbptg / 1000)\ndetector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period\n )\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\nmask = all_coverages[0] > np.max(all_coverages[0] / 10)\nreso = 3\nTrange = [10, 10, 10]\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-4": "from __future__ import division\nfrom pyoperators import pcg\nfrom pysimulators import profile\nfrom qubic import create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition, QubicPlanckAcquisition, QubicInstrument\nfrom qubic.data import PATH\nfrom qubic.io import read_map\nimport healpy as hp\nimport matplotlib.pyplot as mp\nimport numpy as np\n\n\ndef statstr(vec):\n m = np.mean(vec)\n s = np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m, s)\n\n\ndef plotinst(inst, shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant):\n if quad < 4:\n plot(xyc[0], xyc[1], 'ro')\n else:\n plot(xyc[0] + shift, xyc[1], 'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind, sub=(3, 3, iplot + i),\n return_projected_map=True)]\n return out\n\n\ndef profile(x, y, range=None, nbins=10, fmt=None, plot=True, dispersion=\n True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini, maxi - dx, nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion:\n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok]) / fact\n if plot:\n errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0\ndeccenter = -57.0\ncenter = equ2gal(racenter, deccenter)\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\nall_solutions_fusion = []\nall_coverages = []\nnbptg = np.linspace(1000, 5000, 5)\ncorrect_time = 365 * 86400.0 / (nbptg / 1000)\ndetector_nep = 4.7e-17 / np.sqrt(correct_time / len(sampling) * sampling.period\n )\nfor i in xrange(len(all_instruments)):\n acq_qubic = QubicAcquisition(150, sampling, nside=nside, detector_nep=\n detector_nep[i])\n all_coverages.append(acq_qubic.get_coverage())\n convolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n acq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky\n )\n acq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n H = acq_fusion.get_operator()\n invntt = acq_fusion.get_invntt_operator()\n obs = acq_fusion.get_observation()\n A = H.T * invntt * H\n b = H.T * invntt * obs\n solution_fusion = pcg(A, b, disp=True)\n all_solutions_fusion.append(solution_fusion)\nmask = all_coverages[0] > np.max(all_coverages[0] / 10)\nreso = 3\nTrange = [10, 10, 10]\nfor i in xrange(len(nbptg)):\n figure(i)\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n resid[~mask, :] = 0\n display(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n print(std(resid[mask, 0]), std(resid[mask, 1]), std(resid[mask, 2]))\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n subplot(3, 1, 1)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('I RMS residuals')\n ylim(0.1, 2)\n plot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(idata[0], idata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals')\n ylim(0.1, 2)\n plot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(qdata[0], qdata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n yscale('log')\n xlabel('Normalized coverage')\n ylabel('U RMS residuals')\n ylim(0.1, 2)\n plot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n if i == 0:\n plot(udata[0], udata[3] * sqrt(2), '--', color=cols[i], label=names\n [i] + ' x sqrt(2)', lw=2)\n legend(fontsize=fs, loc='upper right')\ncols = ['black', 'red', 'blue', 'green', 'orange']\naa = 0.2\nrng = [-2, 4]\nfs = 8\nnb = 20\nclf()\nfor i in xrange(len(all_instruments)):\n resid = all_solutions_fusion[i]['x'] - convolved_sky\n idata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 0]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n qdata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 1]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n udata = profile(all_coverages[i][mask] / np.max(all_coverages[i]), np.\n nan_to_num(resid[mask, 2]), nbins=nb, range=[0, 1], color=cols[i],\n plot=False)\n if i == 0:\n theidata = idata\n theqdata = qdata\n theudata = udata\n subplot(3, 1, 1)\n xlabel('Normalized coverage')\n ylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n plot(idata[0], idata[3] / theidata[3], color=cols[i], label=names[i], lw=2)\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 2)\n xlabel('Normalized coverage')\n ylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(qdata[0], qdata[3] / theqdata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n subplot(3, 1, 3)\n xlabel('Normalized coverage')\n ylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n ylim(0.0, 3)\n plot(udata[0], udata[3] / theudata[3], color=cols[i], label=names[i], lw=2)\n plot(linspace(0, 1, 10), np.zeros(10) + sqrt(2), 'k--')\n legend(fontsize=fs, loc='upper right')\n",
"step-5": "from __future__ import division\nfrom pyoperators import pcg\nfrom pysimulators import profile\nfrom qubic import (\n create_random_pointings, equ2gal, QubicAcquisition, PlanckAcquisition,\n QubicPlanckAcquisition, QubicInstrument)\nfrom qubic.data import PATH\nfrom qubic.io import read_map\nimport healpy as hp\nimport matplotlib.pyplot as mp\nimport numpy as np\n\n\n\ndef statstr(vec):\n m=np.mean(vec)\n s=np.std(vec)\n return '{0:.4f} +/- {1:.4f}'.format(m,s)\n\ndef plotinst(inst,shift=0.12):\n for xyc, quad in zip(inst.detector.center, inst.detector.quadrant): \n if quad < 4:\n plot(xyc[0],xyc[1],'ro')\n else:\n plot(xyc[0]+shift,xyc[1],'bo')\n xlim(-0.06, 0.18)\n\n\ndef display(input, msg, iplot=1, reso=5, Trange=[100, 5, 5]):\n out = []\n for i, (kind, lim) in enumerate(zip('IQU', Trange)):\n map = input[..., i]\n out += [hp.gnomview(map, rot=center, reso=reso, xsize=800, min=-lim,\n max=lim, title=msg + ' ' + kind,\n sub=(3, 3, iplot + i), return_projected_map=True)]\n return out\n\n\ndef profile(x,y,range=None,nbins=10,fmt=None,plot=True, dispersion=True, color=None):\n if range == None:\n mini = np.min(x)\n maxi = np.max(x)\n else:\n mini = range[0]\n maxi = range[1]\n dx = (maxi - mini) / nbins\n xmin = np.linspace(mini,maxi-dx,nbins)\n xmax = xmin + dx\n xc = xmin + dx / 2\n yval = np.zeros(nbins)\n dy = np.zeros(nbins)\n dx = np.zeros(nbins) + dx / 2\n for i in np.arange(nbins):\n ok = (x > xmin[i]) & (x < xmax[i])\n yval[i] = np.mean(y[ok])\n if dispersion: \n fact = 1\n else:\n fact = np.sqrt(len(y[ok]))\n dy[i] = np.std(y[ok])/fact\n if plot: errorbar(xc, yval, xerr=dx, yerr=dy, fmt=fmt, color=color)\n return xc, yval, dx, dy\n\n\nnside = 256\nracenter = 0.0 # deg\ndeccenter = -57.0 # deg\ncenter = equ2gal(racenter, deccenter)\n\nsky = read_map(PATH + 'syn256_pol.fits')\nsampling = create_random_pointings([racenter, deccenter], 1000, 10)\n\n\nall_solutions_fusion = []\nall_coverages = []\n\nnbptg = np.linspace(1000,5000,5)\ncorrect_time = 365*86400./(nbptg/1000)\ndetector_nep = 4.7e-17/np.sqrt(correct_time / len(sampling)*sampling.period)\n\nfor i in xrange(len(all_instruments)):\n\tacq_qubic = QubicAcquisition(150, sampling, nside=nside,\n detector_nep=detector_nep[i])\n\tall_coverages.append(acq_qubic.get_coverage())\n\tconvolved_sky = acq_qubic.instrument.get_convolution_peak_operator()(sky)\n\tacq_planck = PlanckAcquisition(150, acq_qubic.scene, true_sky=convolved_sky)\n\tacq_fusion = QubicPlanckAcquisition(acq_qubic, acq_planck)\n\n\tH = acq_fusion.get_operator()\n\tinvntt = acq_fusion.get_invntt_operator()\n\tobs = acq_fusion.get_observation()\n\n\tA = H.T * invntt * H\n\tb = H.T * invntt * obs\n\n\tsolution_fusion = pcg(A, b, disp=True)\n\tall_solutions_fusion.append(solution_fusion)\n\n\n\n\n\nmask = all_coverages[0] > np.max(all_coverages[0]/10)\n\nreso=3\nTrange=[10, 10, 10]\nfor i in xrange(len(nbptg)):\n\tfigure(i)\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tresid[~mask,:] = 0\n\tdisplay(resid, 'Difference map', iplot=7, reso=reso, Trange=Trange)\n\tprint(std(resid[mask,0]), std(resid[mask,1]), std(resid[mask,2]))\n\t#savefig(names[i]+'.png')\n\n\ncols=['black', 'red','blue','green', 'orange']\naa=0.2\nrng = [-2,4]\nfs=8\nnb=20\nclf()\nfor i in xrange(len(all_instruments)):\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tidata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tqdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tudata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\n\tsubplot(3,1,1)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('I RMS residuals')\n\tylim(0.1,2)\n\tplot(idata[0], idata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(idata[0], idata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,2)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('Q RMS residuals')\n\tylim(0.1,2)\n\tplot(qdata[0], qdata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(qdata[0], qdata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,3)\n\tyscale('log')\n\txlabel('Normalized coverage')\n\tylabel('U RMS residuals')\n\tylim(0.1,2)\n\tplot(udata[0], udata[3], color=cols[i], label=names[i], lw=2)\n\tif i==0: plot(udata[0], udata[3]*sqrt(2), '--', color=cols[i], label=names[i]+' x sqrt(2)', lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n#savefig('rms.png')\n\n\n\n\ncols=['black', 'red','blue','green', 'orange']\naa=0.2\nrng = [-2,4]\nfs=8\nnb=20\nclf()\nfor i in xrange(len(all_instruments)):\n\tresid = all_solutions_fusion[i]['x'] - convolved_sky\n\tidata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,0]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tqdata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,1]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tudata = profile(all_coverages[i][mask]/np.max(all_coverages[i]), np.nan_to_num(resid[mask,2]), nbins=nb, range=[0,1],color=cols[i], plot=False)\n\tif i == 0 :\n\t\ttheidata = idata\n\t\ttheqdata = qdata\n\t\ttheudata = udata\n\n\tsubplot(3,1,1)\n\txlabel('Normalized coverage')\n\tylabel('I RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tplot(idata[0], idata[3]/theidata[3], color=cols[i], label=names[i], lw=2)\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,2)\n\txlabel('Normalized coverage')\n\tylabel('Q RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(qdata[0], qdata[3]/theqdata[3], color=cols[i], label=names[i], lw=2)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tlegend(fontsize=fs, loc='upper right')\n\n\tsubplot(3,1,3)\n\txlabel('Normalized coverage')\n\tylabel('U RMS residuals ratio \\n w.r.t. Full Instrument')\n\tylim(0.,3)\n\tplot(udata[0], udata[3]/theudata[3], color=cols[i], label=names[i], lw=2)\n\tplot(linspace(0,1,10),np.zeros(10)+sqrt(2), 'k--')\n\tlegend(fontsize=fs, loc='upper right')\n\n#savefig('rms_ratio.png')\n\n\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""The prediction classes. Instances of the class are returned by
the recommender.
"""
class RelationshipPrediction(object):
"""The prediction of the predicted_relationship appearing between
the given subject-object pair.
@type subject: the domain-specific subject
@ivar subject: the subject
@type object_: the domain-specific object
@ivar object_: the object
@type expectancy: float
@ivar expectancy: the estimated probability of the predict_relationship
occuring between the subject and the object
@type explanation: str
@ivar explanation: the explanation for the prediction
"""
def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):
"""The initializer"""
self.subject = subject
"""The subject"""
self.object_ = object_
"""The object"""
self.expectancy = expectancy
"""The estimated probability of the predicted_relationship
occuring between the subject and the object.
"""
self.is_uncertain = is_uncertain
"""Is the prediction made without having any information available?"""
self.explanation = explanation
"""The explanation for the prediction"""
def __unicode__(self):
return u"%s <- %s: %f, %s" % (
self.subject,
self.object_,
self.expectancy,
self.explanation
)
def __repr__(self):
return "< %s >" % str(self.__unicode__())
|
normal
|
{
"blob_id": "c3de9e6129bcafd863cd330ac281345fb563cc8c",
"index": 6259,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-4": "<mask token>\n\n\nclass RelationshipPrediction(object):\n <mask token>\n\n def __init__(self, subject, object_, expectancy, is_uncertain,\n explanation=''):\n \"\"\"The initializer\"\"\"\n self.subject = subject\n \"\"\"The subject\"\"\"\n self.object_ = object_\n \"\"\"The object\"\"\"\n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u'%s <- %s: %f, %s' % (self.subject, self.object_, self.\n expectancy, self.explanation)\n\n def __repr__(self):\n return '< %s >' % str(self.__unicode__())\n",
"step-5": "\"\"\"The prediction classes. Instances of the class are returned by \nthe recommender.\n\"\"\"\n\nclass RelationshipPrediction(object):\n \"\"\"The prediction of the predicted_relationship appearing between\n the given subject-object pair.\n \n @type subject: the domain-specific subject\n @ivar subject: the subject \n \n @type object_: the domain-specific object\n @ivar object_: the object\n \n @type expectancy: float\n @ivar expectancy: the estimated probability of the predict_relationship\n occuring between the subject and the object\n \n @type explanation: str\n @ivar explanation: the explanation for the prediction \n \"\"\"\n \n def __init__(self, subject, object_, expectancy, is_uncertain, explanation=''):\n \"\"\"The initializer\"\"\"\n \n self.subject = subject\n \"\"\"The subject\"\"\"\n \n self.object_ = object_\n \"\"\"The object\"\"\"\n \n self.expectancy = expectancy\n \"\"\"The estimated probability of the predicted_relationship\n occuring between the subject and the object.\n \"\"\"\n \n self.is_uncertain = is_uncertain\n \"\"\"Is the prediction made without having any information available?\"\"\"\n \n self.explanation = explanation\n \"\"\"The explanation for the prediction\"\"\"\n\n def __unicode__(self):\n return u\"%s <- %s: %f, %s\" % (\n self.subject, \n self.object_, \n self.expectancy, \n self.explanation\n )\n \n def __repr__(self):\n return \"< %s >\" % str(self.__unicode__())\n",
"step-ids": [
0,
2,
3,
4,
6
]
}
|
[
0,
2,
3,
4,
6
] |
import os
import json
def load_json_if_exists(path):
if not os.path.isfile(path):
return {}
with open(path) as f:
return json.load(f)
def json_dump(obj, file_path):
with open(file_path, 'w') as f:
json.dump(obj, f)
def get_folder_paths(directory):
return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]
def file_to_lines(file_path):
if len(file_path) == 0:
return []
with open(file_path) as f:
lines = list(f.read().splitlines())
return lines
def get_repo_path(file_path):
if os.path.isfile(file_path):
folder_path = os.path.abspath(os.path.join(file_path, os.pardir))
else:
folder_path = file_path
for i in range(100):
if folder_path == '/':
return None
if is_repo_path(folder_path):
break
folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))
return folder_path
def is_repo_path(path):
return os.path.isdir(path) and '.git' in os.listdir(path)
class LineNumberTracker:
'''
When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,
'''
def __init__(self):
self._log = []
def transform(self, line_num):
for is_add, start, end in self._log:
if line_num < start:
pass
elif line_num < end and not is_add:
assert False, 'Line Deleted: {} {}'.format(line_num, self._log)
else:
if is_add:
line_num += (end - start)
else:
line_num -= (end - start)
return line_num
def remove_lines(self, start, end):
self._log.append((False, start, end))
def add_lines(self, start, end):
self._log.append((True, start, end))
|
normal
|
{
"blob_id": "3788888a17e2598e781803f89cd63ac9c3219f59",
"index": 4341,
"step-1": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\n<mask token>\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-2": "<mask token>\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\n<mask token>\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-3": "<mask token>\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-4": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os\n .path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\n\nclass LineNumberTracker:\n \"\"\"\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n \"\"\"\n\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n elif is_add:\n line_num += end - start\n else:\n line_num -= end - start\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n",
"step-5": "import os\nimport json\n\n\ndef load_json_if_exists(path):\n if not os.path.isfile(path):\n return {}\n with open(path) as f:\n return json.load(f)\n\ndef json_dump(obj, file_path):\n with open(file_path, 'w') as f:\n json.dump(obj, f)\n\ndef get_folder_paths(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isdir(os.path.join(directory, f))]\n\n\ndef file_to_lines(file_path):\n if len(file_path) == 0:\n return []\n with open(file_path) as f:\n lines = list(f.read().splitlines())\n return lines\n\n\ndef get_repo_path(file_path):\n if os.path.isfile(file_path):\n folder_path = os.path.abspath(os.path.join(file_path, os.pardir))\n else:\n folder_path = file_path\n for i in range(100):\n if folder_path == '/':\n return None\n if is_repo_path(folder_path):\n break\n folder_path = os.path.abspath(os.path.join(folder_path, os.pardir))\n return folder_path\n\ndef is_repo_path(path):\n return os.path.isdir(path) and '.git' in os.listdir(path)\n\nclass LineNumberTracker:\n '''\n When deleting/adding lines in a file, this allows you to translate original line numbers into transformed ones,\n '''\n def __init__(self):\n self._log = []\n\n def transform(self, line_num):\n for is_add, start, end in self._log:\n if line_num < start:\n pass\n elif line_num < end and not is_add:\n assert False, 'Line Deleted: {} {}'.format(line_num, self._log)\n else:\n if is_add:\n line_num += (end - start)\n else:\n line_num -= (end - start)\n return line_num\n\n def remove_lines(self, start, end):\n self._log.append((False, start, end))\n\n def add_lines(self, start, end):\n self._log.append((True, start, end))\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
from collections import defaultdict
from docopt import docopt
__doc__ = """{f}
Usage:
{f} <used_file>
{f} -h | --help
Options:
-h --help Show this screen and exit.
""".format(f=__file__)
args = docopt(__doc__)
used_file = args['<used_file>']
exceed_list = []
user_limit_dict = defaultdict(float)
user_limit_f = open('/opt/uge/Accounting_Statistics/etc/user_limit_py.csv', 'r')
reader = csv.reader(user_limit_f)
header = next(reader)
for row in reader:
user_limit_dict[row[0]] = float(row[1])
print user_limit_dict
used_f = open(used_file, 'r')
reader = csv.DictReader(used_f)
for row in reader:
print row
|
normal
|
{
"blob_id": "40b6d62f1e360c0df19b7e98fcb67dbd578e709f",
"index": 736,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport csv\nfrom collections import defaultdict\nfrom docopt import docopt\n\n__doc__ = \"\"\"{f}\n\nUsage:\n {f} <used_file>\n {f} -h | --help\n\nOptions:\n -h --help Show this screen and exit.\n\"\"\".format(f=__file__)\n\nargs = docopt(__doc__)\nused_file = args['<used_file>']\n\nexceed_list = []\n\nuser_limit_dict = defaultdict(float)\n\nuser_limit_f = open('/opt/uge/Accounting_Statistics/etc/user_limit_py.csv', 'r')\n\nreader = csv.reader(user_limit_f)\nheader = next(reader)\nfor row in reader:\n user_limit_dict[row[0]] = float(row[1])\n\nprint user_limit_dict\n\n\nused_f = open(used_file, 'r')\n\nreader = csv.DictReader(used_f)\nfor row in reader:\n print row\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from whylogs.core.annotation_profiling import Rectangle
def test_rect():
rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{"name": "test"}])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 100
assert rect.intersection(test) == 25
assert rect.iou(test) == 25 / 100.0
def test_rect():
rect = Rectangle([[0, 0], [0, 0]])
test = Rectangle([[0, 0], [5, 5]])
assert rect.area == 0
assert rect.intersection(test) == 0
assert rect.iou(test) == 0
|
normal
|
{
"blob_id": "b65d25198d55ab4a859b9718b7b225fa92c13a2b",
"index": 1202,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-4": "from whylogs.core.annotation_profiling import Rectangle\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{'name':\n 'test'}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-5": "from whylogs.core.annotation_profiling import Rectangle\n\n\ndef test_rect():\n\n rect = Rectangle([[0, 0], [10, 10]], confidence=0.8, labels=[{\"name\": \"test\"}])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 100\n assert rect.intersection(test) == 25\n assert rect.iou(test) == 25 / 100.0\n\n\ndef test_rect():\n\n rect = Rectangle([[0, 0], [0, 0]])\n test = Rectangle([[0, 0], [5, 5]])\n assert rect.area == 0\n assert rect.intersection(test) == 0\n assert rect.iou(test) == 0\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import hashlib
from django.conf import settings
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils.crypto import get_random_string
def hexdigest_sha256(*args):
r = hashlib.sha256()
for arg in args:
r.update(str(arg).encode('utf-8'))
return r.hexdigest()
def get_reply_addr(message_id, dest):
if not hasattr(settings, 'REPLY_EMAIL'):
return []
addr = settings.REPLY_EMAIL
pos = addr.find('@')
name = addr[:pos]
domain = addr[pos:]
key = hexdigest_sha256(settings.SECRET_KEY, message_id, dest.pk)[0:12]
return ['%s+%s%s%s%s' % (name, dest.profile.email_token, message_id, key, domain)]
def generate_message_token():
return get_random_string(length=60, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')
def notify_by_email(template, data, subject, sender, dests, message_id, ref=None):
if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):
data.update({'answering': True})
text_message = render_to_string('conversations/emails/%s.txt' % template, data)
html_message = render_to_string('conversations/emails/%s.html' % template, data)
from_email = '{name} <{email}>'.format(
name=sender.get_full_name() or sender.username,
email=settings.DEFAULT_FROM_EMAIL)
# Generating headers
headers = {'Message-ID': "<%s.%s>" % (message_id, settings.DEFAULT_FROM_EMAIL)}
if ref:
# This email reference a previous one
headers.update({
'References': '<%s.%s>' % (ref, settings.DEFAULT_FROM_EMAIL),
})
mails = []
for dest in dests:
if not dest.email:
continue
reply_to = get_reply_addr(message_id, dest)
mails += [(subject, (text_message, html_message), from_email, [dest.email], reply_to, headers)]
messages = []
for subject, message, from_email, dest_emails, reply_to, headers in mails:
text_message, html_message = message
msg = EmailMultiAlternatives(subject, text_message, from_email, dest_emails, reply_to=reply_to,
headers=headers)
msg.attach_alternative(html_message, 'text/html')
messages += [msg]
with mail.get_connection() as connection:
connection.send_messages(messages)
|
normal
|
{
"blob_id": "a35004e2b306ba1a8649ce66a1612f63a2b6bf39",
"index": 2673,
"step-1": "<mask token>\n\n\ndef hexdigest_sha256(*args):\n r = hashlib.sha256()\n for arg in args:\n r.update(str(arg).encode('utf-8'))\n return r.hexdigest()\n\n\n<mask token>\n\n\ndef notify_by_email(template, data, subject, sender, dests, message_id, ref\n =None):\n if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):\n data.update({'answering': True})\n text_message = render_to_string('conversations/emails/%s.txt' %\n template, data)\n html_message = render_to_string('conversations/emails/%s.html' %\n template, data)\n from_email = '{name} <{email}>'.format(name=sender.get_full_name() or\n sender.username, email=settings.DEFAULT_FROM_EMAIL)\n headers = {'Message-ID': '<%s.%s>' % (message_id, settings.\n DEFAULT_FROM_EMAIL)}\n if ref:\n headers.update({'References': '<%s.%s>' % (ref, settings.\n DEFAULT_FROM_EMAIL)})\n mails = []\n for dest in dests:\n if not dest.email:\n continue\n reply_to = get_reply_addr(message_id, dest)\n mails += [(subject, (text_message, html_message), from_email, [dest\n .email], reply_to, headers)]\n messages = []\n for subject, message, from_email, dest_emails, reply_to, headers in mails:\n text_message, html_message = message\n msg = EmailMultiAlternatives(subject, text_message, from_email,\n dest_emails, reply_to=reply_to, headers=headers)\n msg.attach_alternative(html_message, 'text/html')\n messages += [msg]\n with mail.get_connection() as connection:\n connection.send_messages(messages)\n",
"step-2": "<mask token>\n\n\ndef hexdigest_sha256(*args):\n r = hashlib.sha256()\n for arg in args:\n r.update(str(arg).encode('utf-8'))\n return r.hexdigest()\n\n\n<mask token>\n\n\ndef generate_message_token():\n return get_random_string(length=60, allowed_chars=\n 'abcdefghijklmnopqrstuvwxyz0123456789')\n\n\ndef notify_by_email(template, data, subject, sender, dests, message_id, ref\n =None):\n if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):\n data.update({'answering': True})\n text_message = render_to_string('conversations/emails/%s.txt' %\n template, data)\n html_message = render_to_string('conversations/emails/%s.html' %\n template, data)\n from_email = '{name} <{email}>'.format(name=sender.get_full_name() or\n sender.username, email=settings.DEFAULT_FROM_EMAIL)\n headers = {'Message-ID': '<%s.%s>' % (message_id, settings.\n DEFAULT_FROM_EMAIL)}\n if ref:\n headers.update({'References': '<%s.%s>' % (ref, settings.\n DEFAULT_FROM_EMAIL)})\n mails = []\n for dest in dests:\n if not dest.email:\n continue\n reply_to = get_reply_addr(message_id, dest)\n mails += [(subject, (text_message, html_message), from_email, [dest\n .email], reply_to, headers)]\n messages = []\n for subject, message, from_email, dest_emails, reply_to, headers in mails:\n text_message, html_message = message\n msg = EmailMultiAlternatives(subject, text_message, from_email,\n dest_emails, reply_to=reply_to, headers=headers)\n msg.attach_alternative(html_message, 'text/html')\n messages += [msg]\n with mail.get_connection() as connection:\n connection.send_messages(messages)\n",
"step-3": "<mask token>\n\n\ndef hexdigest_sha256(*args):\n r = hashlib.sha256()\n for arg in args:\n r.update(str(arg).encode('utf-8'))\n return r.hexdigest()\n\n\ndef get_reply_addr(message_id, dest):\n if not hasattr(settings, 'REPLY_EMAIL'):\n return []\n addr = settings.REPLY_EMAIL\n pos = addr.find('@')\n name = addr[:pos]\n domain = addr[pos:]\n key = hexdigest_sha256(settings.SECRET_KEY, message_id, dest.pk)[0:12]\n return ['%s+%s%s%s%s' % (name, dest.profile.email_token, message_id,\n key, domain)]\n\n\ndef generate_message_token():\n return get_random_string(length=60, allowed_chars=\n 'abcdefghijklmnopqrstuvwxyz0123456789')\n\n\ndef notify_by_email(template, data, subject, sender, dests, message_id, ref\n =None):\n if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):\n data.update({'answering': True})\n text_message = render_to_string('conversations/emails/%s.txt' %\n template, data)\n html_message = render_to_string('conversations/emails/%s.html' %\n template, data)\n from_email = '{name} <{email}>'.format(name=sender.get_full_name() or\n sender.username, email=settings.DEFAULT_FROM_EMAIL)\n headers = {'Message-ID': '<%s.%s>' % (message_id, settings.\n DEFAULT_FROM_EMAIL)}\n if ref:\n headers.update({'References': '<%s.%s>' % (ref, settings.\n DEFAULT_FROM_EMAIL)})\n mails = []\n for dest in dests:\n if not dest.email:\n continue\n reply_to = get_reply_addr(message_id, dest)\n mails += [(subject, (text_message, html_message), from_email, [dest\n .email], reply_to, headers)]\n messages = []\n for subject, message, from_email, dest_emails, reply_to, headers in mails:\n text_message, html_message = message\n msg = EmailMultiAlternatives(subject, text_message, from_email,\n dest_emails, reply_to=reply_to, headers=headers)\n msg.attach_alternative(html_message, 'text/html')\n messages += [msg]\n with mail.get_connection() as connection:\n connection.send_messages(messages)\n",
"step-4": "import hashlib\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.crypto import get_random_string\n\n\ndef hexdigest_sha256(*args):\n r = hashlib.sha256()\n for arg in args:\n r.update(str(arg).encode('utf-8'))\n return r.hexdigest()\n\n\ndef get_reply_addr(message_id, dest):\n if not hasattr(settings, 'REPLY_EMAIL'):\n return []\n addr = settings.REPLY_EMAIL\n pos = addr.find('@')\n name = addr[:pos]\n domain = addr[pos:]\n key = hexdigest_sha256(settings.SECRET_KEY, message_id, dest.pk)[0:12]\n return ['%s+%s%s%s%s' % (name, dest.profile.email_token, message_id,\n key, domain)]\n\n\ndef generate_message_token():\n return get_random_string(length=60, allowed_chars=\n 'abcdefghijklmnopqrstuvwxyz0123456789')\n\n\ndef notify_by_email(template, data, subject, sender, dests, message_id, ref\n =None):\n if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):\n data.update({'answering': True})\n text_message = render_to_string('conversations/emails/%s.txt' %\n template, data)\n html_message = render_to_string('conversations/emails/%s.html' %\n template, data)\n from_email = '{name} <{email}>'.format(name=sender.get_full_name() or\n sender.username, email=settings.DEFAULT_FROM_EMAIL)\n headers = {'Message-ID': '<%s.%s>' % (message_id, settings.\n DEFAULT_FROM_EMAIL)}\n if ref:\n headers.update({'References': '<%s.%s>' % (ref, settings.\n DEFAULT_FROM_EMAIL)})\n mails = []\n for dest in dests:\n if not dest.email:\n continue\n reply_to = get_reply_addr(message_id, dest)\n mails += [(subject, (text_message, html_message), from_email, [dest\n .email], reply_to, headers)]\n messages = []\n for subject, message, from_email, dest_emails, reply_to, headers in mails:\n text_message, html_message = message\n msg = EmailMultiAlternatives(subject, text_message, from_email,\n dest_emails, reply_to=reply_to, headers=headers)\n msg.attach_alternative(html_message, 'text/html')\n messages += [msg]\n with mail.get_connection() as connection:\n connection.send_messages(messages)\n",
"step-5": "import hashlib\n\nfrom django.conf import settings\nfrom django.core import mail\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom django.utils.crypto import get_random_string\n\n\ndef hexdigest_sha256(*args):\n\n r = hashlib.sha256()\n for arg in args:\n r.update(str(arg).encode('utf-8'))\n\n return r.hexdigest()\n\n\ndef get_reply_addr(message_id, dest):\n\n if not hasattr(settings, 'REPLY_EMAIL'):\n return []\n\n addr = settings.REPLY_EMAIL\n pos = addr.find('@')\n name = addr[:pos]\n domain = addr[pos:]\n key = hexdigest_sha256(settings.SECRET_KEY, message_id, dest.pk)[0:12]\n\n return ['%s+%s%s%s%s' % (name, dest.profile.email_token, message_id, key, domain)]\n\n\ndef generate_message_token():\n return get_random_string(length=60, allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789')\n\n\ndef notify_by_email(template, data, subject, sender, dests, message_id, ref=None):\n\n if hasattr(settings, 'REPLY_EMAIL') and hasattr(settings, 'REPLY_KEY'):\n data.update({'answering': True})\n\n text_message = render_to_string('conversations/emails/%s.txt' % template, data)\n html_message = render_to_string('conversations/emails/%s.html' % template, data)\n\n from_email = '{name} <{email}>'.format(\n name=sender.get_full_name() or sender.username,\n email=settings.DEFAULT_FROM_EMAIL)\n\n # Generating headers\n headers = {'Message-ID': \"<%s.%s>\" % (message_id, settings.DEFAULT_FROM_EMAIL)}\n if ref:\n # This email reference a previous one\n headers.update({\n 'References': '<%s.%s>' % (ref, settings.DEFAULT_FROM_EMAIL),\n })\n\n mails = []\n for dest in dests:\n if not dest.email:\n continue\n\n reply_to = get_reply_addr(message_id, dest)\n\n mails += [(subject, (text_message, html_message), from_email, [dest.email], reply_to, headers)]\n\n messages = []\n for subject, message, from_email, dest_emails, reply_to, headers in mails:\n text_message, html_message = message\n msg = EmailMultiAlternatives(subject, text_message, from_email, dest_emails, reply_to=reply_to,\n headers=headers)\n msg.attach_alternative(html_message, 'text/html')\n messages += [msg]\n with mail.get_connection() as connection:\n connection.send_messages(messages)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')
import django
django.setup()
from main.models import Character, Vehicle, Tire, Glider
char_names = [
'Mario',
'Luigi',
'Peach',
'Daisy',
'Rosalina',
'Mario Tanooki',
'Peach cat',
'Yoshi',
'Yoshi (LBlue)',
'Yoshi (Black)',
'Yoshi (Rose)',
'Yoshi (Yellow)',
'Yoshi (White)',
'Yoshi (Blue)',
'Yoshi (Rose)',
'Yoshi (Orange)',
'Toad',
'Koopa',
'Shyguy',
'Shyguy (LB)',
'Shyguy (Black)',
'Shyguy (Rose)',
'Shyguy (Yellow)',
'Shyguy (White)',
'Shyguy (Blue)',
'Shyguy (Rose)',
'Shyguy (Orange)',
'Lakitu',
'Toadette',
'Boo',
'Baby Mario',
'Baby Luigi',
'Baby Peach',
'Baby Daisy',
'Baby Rosalina',
'Metal Mario',
'Golden Mario',
'Golden Peach',
'Wario',
'Waluigi',
'Donkey Kong',
'Bowser',
'Skelerex',
'Bowser Jr',
'Dry Bowser',
'Lemmy',
'Larry',
'Wendy',
'Ludwig',
'Iggy',
'Roy',
'Morton',
'Inkling (G)',
'Inkling (B)',
'Link (SSBU)',
'Link (BOTW)',
'Villager (B)',
'Villager(G)',
'Mary',
]
char_urls = [
'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',
'https://freepngimg.com/thumb/categories/462.png',
'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',
'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',
'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',
'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',
'https://www.123-stickers.com/5731-6069-large/Array.jpg',
'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',
'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',
'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',
'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',
'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',
'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',
'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',
'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',
'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',
'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',
'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',
'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',
'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',
'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',
'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',
'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',
'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',
'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',
'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',
'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',
'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',
'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',
'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',
'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',
'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',
'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',
'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',
'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',
'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',
'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',
'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',
'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',
'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',
'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',
'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',
'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',
'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',
'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',
'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',
'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',
'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',
]
car_names = [
'Standard Kart',
'Pipe Frame',
'Mach 8',
'Steel Driver',
'Cat Cruiser',
'Circuit Special',
'Tri-Speeder',
'Badwagon',
'Prancer',
'Biddybuggy',
'Landship',
'Sneeker',
'Sports Coupe',
'Gold Standard',
'GLA',
'W 25 Silver Arrow',
'300 SL Roadster',
'Blue Falcon',
'Tanooki Kart',
'B Dasher',
'Streetle',
'P-Wing',
'Koopa Clown',
'Standard Bike',
'Comet',
'Sport Bike',
'The Duke',
'Flame Rider',
'Varmint',
'Mr. Scooty',
'Jet Bike',
'Yoshi Bike',
'Master Cycle',
'Master Cycle Zero',
'City Tripper',
'Standard ATV',
'Wild Wiggler',
'Teddy Buggy',
'Bone Rattler',
'Splat Buggy',
'Inkstriker',
]
car_urls = [
'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',
'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',
'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',
'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',
'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',
'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',
'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',
'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',
'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',
'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',
'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',
'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',
'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',
'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',
'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',
'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',
'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',
'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',
'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',
'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',
'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',
'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',
'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',
'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',
'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',
'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',
'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',
'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',
'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',
'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',
'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',
'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',
'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',
'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',
'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',
]
tire_names = [
'Standard',
'Monster',
'Roller',
'Slim',
'Slick',
'Metal',
'Button',
'Off-Road',
'Sponge',
'Wood',
'Cushion',
'Blue Standard',
'Hot Monster',
'Azure Roller',
'Crimson Slim',
'Cyber Slick',
'Retro Off-Road',
'Gold Tires',
'GLA Tires',
'Triforce Tires',
'Ancient Tyres',
'Leaf Tires',
]
tire_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',
'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',
'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',
'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',
'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',
'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',
'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',
'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',
'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',
'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',
'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',
'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',
'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',
'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',
'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',
'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',
'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',
'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',
'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',
'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',
]
glider_names = [
'Super Glider',
'Cloud Glider',
'Wario Wing',
'Waddle Wing',
'Peach Parasol',
'Parachute',
'Parafoil',
'Flower Glider',
'Bowser Kite',
'Plane Glider',
'MKTV Parafoil',
'Gold Glider',
'Hylian Kite',
'Paraglider',
'Paper Glider',
]
glider_urls = [
'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',
'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',
'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',
'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',
'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',
'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',
'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',
'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',
'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',
'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',
'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',
'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',
'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',
'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',
'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',
]
x=0
y=0
for char in char_names:
index=x-y+1
name = char_names[x]
if "Yoshi (" in name or "Shyguy (" in name or "(G)" in name:
y+=1
index=None
new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)
new_char.save()
x+=1
x=0
for tire in tire_names:
index=x+1
new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)
new_tire.save()
x+=1
x=0
for car in car_names:
index=x+1
new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)
new_car.save()
x+=1
x=0
for glider in glider_names:
index=x+1
new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)
new_glider.save()
x+=1
|
normal
|
{
"blob_id": "dbda5df7dff3f8acc320ffe7b9c7c279ebed2cc2",
"index": 7108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\n<mask token>\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\n<mask token>\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\n<mask token>\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-3": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\n<mask token>\ndjango.setup()\n<mask token>\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-4": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mkrandom.settings')\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = ['Mario', 'Luigi', 'Peach', 'Daisy', 'Rosalina',\n 'Mario Tanooki', 'Peach cat', 'Yoshi', 'Yoshi (LBlue)', 'Yoshi (Black)',\n 'Yoshi (Rose)', 'Yoshi (Yellow)', 'Yoshi (White)', 'Yoshi (Blue)',\n 'Yoshi (Rose)', 'Yoshi (Orange)', 'Toad', 'Koopa', 'Shyguy',\n 'Shyguy (LB)', 'Shyguy (Black)', 'Shyguy (Rose)', 'Shyguy (Yellow)',\n 'Shyguy (White)', 'Shyguy (Blue)', 'Shyguy (Rose)', 'Shyguy (Orange)',\n 'Lakitu', 'Toadette', 'Boo', 'Baby Mario', 'Baby Luigi', 'Baby Peach',\n 'Baby Daisy', 'Baby Rosalina', 'Metal Mario', 'Golden Mario',\n 'Golden Peach', 'Wario', 'Waluigi', 'Donkey Kong', 'Bowser', 'Skelerex',\n 'Bowser Jr', 'Dry Bowser', 'Lemmy', 'Larry', 'Wendy', 'Ludwig', 'Iggy',\n 'Roy', 'Morton', 'Inkling (G)', 'Inkling (B)', 'Link (SSBU)',\n 'Link (BOTW)', 'Villager (B)', 'Villager(G)', 'Mary']\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr'\n , 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr'\n ,\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png'\n ,\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png'\n ,\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809'\n , 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png'\n ,\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png'\n ,\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo'\n ,\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png'\n ,\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM'\n ,\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ'\n ,\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910'\n ,\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014'\n ,\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222'\n ,\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png'\n ,\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117'\n , 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405'\n ,\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8'\n , 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141'\n ,\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525'\n ,\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png'\n ,\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129'\n ,\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr'\n ,\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112'\n ,\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405'\n ,\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png'\n ,\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png'\n ,\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543'\n ,\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en'\n ,\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png'\n ,\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr'\n ]\ncar_names = ['Standard Kart', 'Pipe Frame', 'Mach 8', 'Steel Driver',\n 'Cat Cruiser', 'Circuit Special', 'Tri-Speeder', 'Badwagon', 'Prancer',\n 'Biddybuggy', 'Landship', 'Sneeker', 'Sports Coupe', 'Gold Standard',\n 'GLA', 'W 25 Silver Arrow', '300 SL Roadster', 'Blue Falcon',\n 'Tanooki Kart', 'B Dasher', 'Streetle', 'P-Wing', 'Koopa Clown',\n 'Standard Bike', 'Comet', 'Sport Bike', 'The Duke', 'Flame Rider',\n 'Varmint', 'Mr. Scooty', 'Jet Bike', 'Yoshi Bike', 'Master Cycle',\n 'Master Cycle Zero', 'City Tripper', 'Standard ATV', 'Wild Wiggler',\n 'Teddy Buggy', 'Bone Rattler', 'Splat Buggy', 'Inkstriker']\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507'\n ]\ntire_names = ['Standard', 'Monster', 'Roller', 'Slim', 'Slick', 'Metal',\n 'Button', 'Off-Road', 'Sponge', 'Wood', 'Cushion', 'Blue Standard',\n 'Hot Monster', 'Azure Roller', 'Crimson Slim', 'Cyber Slick',\n 'Retro Off-Road', 'Gold Tires', 'GLA Tires', 'Triforce Tires',\n 'Ancient Tyres', 'Leaf Tires']\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810'\n ]\nglider_names = ['Super Glider', 'Cloud Glider', 'Wario Wing', 'Waddle Wing',\n 'Peach Parasol', 'Parachute', 'Parafoil', 'Flower Glider',\n 'Bowser Kite', 'Plane Glider', 'MKTV Parafoil', 'Gold Glider',\n 'Hylian Kite', 'Paraglider', 'Paper Glider']\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246'\n ,\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313'\n ]\nx = 0\ny = 0\nfor char in char_names:\n index = x - y + 1\n name = char_names[x]\n if 'Yoshi (' in name or 'Shyguy (' in name or '(G)' in name:\n y += 1\n index = None\n new_char = Character(name=char_names[x], image_url=char_urls[x], index=\n index)\n new_char.save()\n x += 1\nx = 0\nfor tire in tire_names:\n index = x + 1\n new_tire = Tire(name=tire_names[x], image_url=tire_urls[x], index=index)\n new_tire.save()\n x += 1\nx = 0\nfor car in car_names:\n index = x + 1\n new_car = Vehicle(name=car_names[x], image_url=car_urls[x], index=index)\n new_car.save()\n x += 1\nx = 0\nfor glider in glider_names:\n index = x + 1\n new_glider = Glider(name=glider_names[x], image_url=glider_urls[x],\n index=index)\n new_glider.save()\n x += 1\n",
"step-5": "import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','mkrandom.settings')\n\nimport django\ndjango.setup()\nfrom main.models import Character, Vehicle, Tire, Glider\nchar_names = [\n 'Mario',\n 'Luigi',\n 'Peach',\n 'Daisy',\n 'Rosalina',\n 'Mario Tanooki',\n 'Peach cat',\n 'Yoshi',\n 'Yoshi (LBlue)',\n 'Yoshi (Black)',\n 'Yoshi (Rose)',\n 'Yoshi (Yellow)',\n 'Yoshi (White)',\n 'Yoshi (Blue)',\n 'Yoshi (Rose)',\n 'Yoshi (Orange)',\n 'Toad',\n 'Koopa',\n 'Shyguy',\n 'Shyguy (LB)',\n 'Shyguy (Black)',\n 'Shyguy (Rose)',\n 'Shyguy (Yellow)',\n 'Shyguy (White)',\n 'Shyguy (Blue)',\n 'Shyguy (Rose)',\n 'Shyguy (Orange)',\n 'Lakitu',\n 'Toadette',\n 'Boo',\n 'Baby Mario',\n 'Baby Luigi',\n 'Baby Peach',\n 'Baby Daisy',\n 'Baby Rosalina',\n 'Metal Mario',\n 'Golden Mario',\n 'Golden Peach',\n 'Wario',\n 'Waluigi',\n 'Donkey Kong',\n 'Bowser',\n 'Skelerex',\n 'Bowser Jr',\n 'Dry Bowser',\n 'Lemmy',\n 'Larry',\n 'Wendy',\n 'Ludwig',\n 'Iggy',\n 'Roy',\n 'Morton',\n 'Inkling (G)',\n 'Inkling (B)',\n 'Link (SSBU)',\n 'Link (BOTW)',\n 'Villager (B)',\n 'Villager(G)',\n 'Mary',\n]\n\nchar_urls = [\n 'https://static.wikia.nocookie.net/heros/images/9/94/Mario_and_Sonic_Tokyo_2020_Mario_artwork.png/revision/latest?cb=20210410003745&path-prefix=fr',\n 'https://freepngimg.com/thumb/categories/462.png',\n 'https://static.wikia.nocookie.net/smashbros/images/0/06/Peach_SMP.png/revision/latest?cb=20190420130956&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/6/6c/Artwork_Daisy_MP10.png/revision/latest?cb=20171021130941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/1/17/Harmonie_The_Top_100.png/revision/latest?cb=20171021123917&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/3/33/Mario_tanuki_-_SM3DL.png/revision/latest/scale-to-width-down/250?cb=20190409114830&path-prefix=fr',\n 'https://i.pinimg.com/originals/7d/5d/d8/7d5dd803a6eaad9e7491ed59f184eb39.png',\n 'https://www.seekpng.com/png/full/15-156558_ground-pound-yoshi-super-mario-yoshi-png.png',\n 'https://static.wikia.nocookie.net/hello-yoshi/images/f/fb/ACL_MK8_Light_Blue_Yoshi.png/revision/latest?cb=20180325192809',\n 'https://www.123-stickers.com/5731-6069-large/Array.jpg',\n 'https://static.wikia.nocookie.net/supermariorun/images/3/32/Yoshi_rouge.PNG/revision/latest?cb=20190427132857&path-prefix=fr',\n 'https://static.wikia.nocookie.net/supermariorun/images/9/94/Yoshi_jaune.PNG/revision/latest?cb=20190427132253&path-prefix=fr',\n 'https://static.wikia.nocookie.net/yoshi/images/b/b9/Yoshi_blanc.png/revision/latest?cb=20181128092526&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9a/MKT_Artwork_BlueYoshi.png/129px-MKT_Artwork_BlueYoshi.png',\n 'https://e7.pngegg.com/pngimages/860/699/png-clipart-mario-yoshi-yoshi-s-story-super-mario-world-2-yoshi-s-island-yoshi-s-woolly-world-yoshi-s-new-island-yoshi-nintendo-computer-wallpaper.png',\n 'https://static.wikia.nocookie.net/yoshi/images/a/a4/Orange-yoshi-yoshi-29007923-415-479.png/revision/latest?cb=20201026191941&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/e/e4/SMRToad.png/revision/latest?cb=20161123170829&path-prefix=fr',\n 'https://static.wikia.nocookie.net/smashbros/images/e/ed/Art_Koopa_NSMB.png/revision/latest?cb=20131223214127&path-prefix=fr',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/d585815f-9fc0-440f-9949-a4a9c06bb713/db7whvu-94fc7f0d-1dea-47aa-922d-428a26ed8480.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcL2Q1ODU4MTVmLTlmYzAtNDQwZi05OTQ5LWE0YTljMDZiYjcxM1wvZGI3d2h2dS05NGZjN2YwZC0xZGVhLTQ3YWEtOTIyZC00MjhhMjZlZDg0ODAucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.iNMsbFuXa43xVer7q_c2UB65P2wAVONONt-wrMHozjo',\n 'https://i.pinimg.com/originals/58/69/c3/5869c3396ea69ca97c76f0b725099aa9.png',\n 'https://static.wikia.nocookie.net/supermarioexploration/images/8/8e/18B83E32-0819-4994-A3F8-E90CC35AB8AC.png/revision/latest/scale-to-width-down/872?cb=20180607214102',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dw0-1d608b14-5aba-43f7-b4a8-e855207824c1.png/v1/fill/w_600,h_815,strp/super_mario__green_shy_guy_2d_by_joshuat1306_dcz4dw0-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHcwLTFkNjA4YjE0LTVhYmEtNDNmNy1iNGE4LWU4NTUyMDc4MjRjMS5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.RxuED4zTRqJT-3TAQ8iHGS6zpoDw4O4DIKFQ8cKWpSM',\n 'https://static.miraheze.org/drmarioworldwiki/thumb/9/9a/Cha_sub_shyguyYellow.png/144px-Cha_sub_shyguyYellow.png',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz564x-7c505016-32d8-4268-b44e-358edcb1b10d.png/v1/fill/w_600,h_815,strp/super_mario__white_shy_guy_2d_by_joshuat1306_dcz564x-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o1NjR4LTdjNTA1MDE2LTMyZDgtNDI2OC1iNDRlLTM1OGVkY2IxYjEwZC5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.gLfujNRPJ5nNiOq-siQUD6ifo28x0oQHEB4PrpNHqFk',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/ed991cf4-7c8c-4530-b6ba-a3abf3ab2eae/dcz4dqq-95483c93-ee74-4ca0-a820-3287359457a3.png/v1/fill/w_600,h_815,strp/super_mario__blue_shy_guy_2d_by_joshuat1306_dcz4dqq-fullview.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7ImhlaWdodCI6Ijw9ODE1IiwicGF0aCI6IlwvZlwvZWQ5OTFjZjQtN2M4Yy00NTMwLWI2YmEtYTNhYmYzYWIyZWFlXC9kY3o0ZHFxLTk1NDgzYzkzLWVlNzQtNGNhMC1hODIwLTMyODczNTk0NTdhMy5wbmciLCJ3aWR0aCI6Ijw9NjAwIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmltYWdlLm9wZXJhdGlvbnMiXX0.w1w6wZOiQ0oxfwNTiiuFy2Ph6yO6mN99-U_HYKZdZyQ',\n 'https://static.wikia.nocookie.net/paper-shin-aka-keroro-gunsou/images/f/f0/Pink_Shy_Guy_dance.png/revision/latest/scale-to-width-down/250?cb=20210525165708',\n 'https://static.wikia.nocookie.net/fantendo/images/f/ff/ShyGuyn_s._Png/revision/latest/scale-to-width-down/250?cb=20121222235649',\n 'https://static.wikia.nocookie.net/fantendo/images/e/eb/Cloudless_Lakitu.png/revision/latest/scale-to-width-down/250?cb=20120809192910',\n 'https://static.wikia.nocookie.net/mario/images/b/b2/ToadetteMP10.png/revision/latest?cb=20190609122040&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/a/a1/Boo_CTTT.png/revision/latest?cb=20210504081014',\n 'https://static.wikia.nocookie.net/videogames-fanon/images/d/d9/BabySit.png/revision/latest?cb=20120930205222',\n 'https://i.pinimg.com/originals/c8/4d/1f/c84d1f11741ee80b7bbda79a449917ab.png',\n 'https://www.pngkit.com/png/full/436-4365611_download-zip-archive-baby-peach-mario-bros.png',\n 'https://static.wikia.nocookie.net/fantendo/images/b/be/Baby_Daisy.png/revision/latest?cb=20210119015117',\n 'https://mario.wiki.gallery/images/3/33/MKT_Artwork_BabyRosalina.png',\n 'https://static.wikia.nocookie.net/mario/images/7/7e/Metal_Mario_Artwork_2_-_Mario_Kart_7.png/revision/latest?cb=20120513171323',\n 'https://static.wikia.nocookie.net/mario/images/1/10/MGWT_Gold_Mario.png/revision/latest?cb=20190317040405',\n 'https://images-wixmp-ed30a86b8c4ca887773594c2.wixmp.com/f/0e738c17-7f3c-422e-8225-f8c782b08626/deg7wos-27ff3182-82ba-43ab-b5c0-f05cbec329f2.png?token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJ1cm46YXBwOjdlMGQxODg5ODIyNjQzNzNhNWYwZDQxNWVhMGQyNmUwIiwiaXNzIjoidXJuOmFwcDo3ZTBkMTg4OTgyMjY0MzczYTVmMGQ0MTVlYTBkMjZlMCIsIm9iaiI6W1t7InBhdGgiOiJcL2ZcLzBlNzM4YzE3LTdmM2MtNDIyZS04MjI1LWY4Yzc4MmIwODYyNlwvZGVnN3dvcy0yN2ZmMzE4Mi04MmJhLTQzYWItYjVjMC1mMDVjYmVjMzI5ZjIucG5nIn1dXSwiYXVkIjpbInVybjpzZXJ2aWNlOmZpbGUuZG93bmxvYWQiXX0.bK3J5_NJrKn-JHsqIxEUCjBiXqM4dMnBho-b2lJ6sK8',\n 'https://www.smashbros.com/assets_v2/img/fighter/wario/main2.png',\n 'https://static.wikia.nocookie.net/wario/images/8/8a/Waluigi%28SMP%290.png/revision/latest?cb=20180929091141',\n 'https://static.wikia.nocookie.net/heroes-fr/images/5/5c/Donkey_Kong.png/revision/latest?cb=20201122110342&path-prefix=fr',\n 'https://static.wikia.nocookie.net/epicpixelbattles/images/0/0b/Bowser-png-clipart-removebg-preview.png/revision/latest?cb=20201013093525',\n 'https://static.wikia.nocookie.net/mario/images/1/12/MPSRSkelerex.png/revision/latest/scale-to-width-down/2000?cb=20161015183419&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/0/07/Art_Bowser_Jr_SPM.png/revision/latest?cb=20181112222531&path-prefix=fr',\n 'https://mario.wiki.gallery/images/thumb/9/9d/Dry_Bowser_Artwork.png/250px-Dry_Bowser_Artwork.png',\n 'https://www.pngkey.com/png/full/563-5634904_super-mario-odyssey-lemmy-mario-kart-8-deluxe.png',\n 'https://static.wikia.nocookie.net/mariokart/images/4/42/LarryKoopa.png/revision/latest?cb=20140313170129',\n 'https://mario.wiki.gallery/images/thumb/9/95/NSMBW_Wendy_Artwork.png/1200px-NSMBW_Wendy_Artwork.png',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/f6/1-1571859148.png/revision/latest?cb=20191023193229&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario/images/4/4c/Iggy_NSMBU.png/revision/latest?cb=20171208215237&path-prefix=fr',\n 'https://static.wikia.nocookie.net/mario-fr/images/f/fb/2.png/revision/latest?cb=20191023191713&path-prefix=fr',\n 'https://static.wikia.nocookie.net/fantendo/images/4/4f/Morton_Koopa_Jr_3D.png/revision/latest?cb=20110403192112',\n 'https://static.wikia.nocookie.net/mario/images/2/2e/Inkling_SSBU.png/revision/latest?cb=20200216081405',\n 'https://i.pinimg.com/originals/7c/ce/f8/7ccef872fcee2e11945c6799ce2985cc.png',\n 'https://www.seekpng.com/png/full/7-73001_link-zelda-png-super-smash-bros-for-wii.png',\n 'https://static.wikia.nocookie.net/versus-compendium/images/0/00/Link_BotW.png/revision/latest?cb=20181128185543',\n 'https://static.wikia.nocookie.net/nintendo/images/1/1d/Villager-Boy-1.png/revision/latest?cb=20150419125930&path-prefix=en',\n 'https://i.pinimg.com/originals/bb/ca/f7/bbcaf749d9dc2d1b1259e8fe5cb49769.png',\n 'https://static.wikia.nocookie.net/nintendo-univers/images/a/a9/Marie_ACAF_3.png/revision/latest?cb=20161221163100&path-prefix=fr',\n]\n\ncar_names = [\n 'Standard Kart',\n 'Pipe Frame',\n 'Mach 8',\n 'Steel Driver',\n 'Cat Cruiser',\n 'Circuit Special',\n 'Tri-Speeder',\n 'Badwagon',\n 'Prancer',\n 'Biddybuggy',\n 'Landship',\n 'Sneeker',\n 'Sports Coupe',\n 'Gold Standard',\n 'GLA',\n 'W 25 Silver Arrow',\n '300 SL Roadster',\n 'Blue Falcon',\n 'Tanooki Kart',\n 'B Dasher',\n 'Streetle',\n 'P-Wing',\n 'Koopa Clown',\n 'Standard Bike',\n 'Comet',\n 'Sport Bike',\n 'The Duke',\n 'Flame Rider',\n 'Varmint',\n 'Mr. Scooty',\n 'Jet Bike',\n 'Yoshi Bike',\n 'Master Cycle',\n 'Master Cycle Zero',\n 'City Tripper',\n 'Standard ATV',\n 'Wild Wiggler',\n 'Teddy Buggy',\n 'Bone Rattler',\n 'Splat Buggy',\n 'Inkstriker',\n]\n\ncar_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/0/05/StandardKartBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20140715154926',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/PipeFrameBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122932',\n 'https://static.wikia.nocookie.net/mariokart/images/d/df/Mach8BodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102122956',\n 'https://static.wikia.nocookie.net/mariokart/images/9/94/Steel_Driver.png/revision/latest/scale-to-width-down/100?cb=20200925190921',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f4/CatCruiserBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123132',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6c/CircuitSpecialBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123237',\n 'https://static.wikia.nocookie.net/mariokart/images/5/56/TrispeederBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123217',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/BadwagonBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123350',\n 'https://static.wikia.nocookie.net/mariokart/images/f/ff/PrancerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123333',\n 'https://static.wikia.nocookie.net/mariokart/images/4/45/BiddybuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123322',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6d/LandshipBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123656',\n 'https://static.wikia.nocookie.net/mariokart/images/4/47/SneakerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123617',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SportsCoupeMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123625',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/MK8Gold_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102123637',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c2/GLA-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140333',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/W25SilverArrow-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/1/17/300SLRoadster-MK8.png/revision/latest/scale-to-width-down/100?cb=20160102140332',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ed/MK8_BlueFalcon.png/revision/latest/scale-to-width-down/100?cb=20150331235059',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d7/MK8_TanookiBuggy.png/revision/latest/scale-to-width-down/100?cb=20150331235545',\n 'https://static.wikia.nocookie.net/mariokart/images/3/32/MK8_BDasher.png/revision/latest/scale-to-width-down/100?cb=20150401000836',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cf/MK8Streetle.png/revision/latest/scale-to-width-down/100?cb=20150426174005',\n 'https://static.wikia.nocookie.net/mariokart/images/c/cd/MK8PWing.png/revision/latest/scale-to-width-down/100?cb=20150426174107',\n 'https://static.wikia.nocookie.net/mariokart/images/7/70/MK8DX_Koopa_Clown.png/revision/latest/scale-to-width-down/100?cb=20170704061052',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/StandardBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123849',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/CometBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124024',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/SportBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123857',\n 'https://static.wikia.nocookie.net/mariokart/images/8/8a/TheDukeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925174819',\n 'https://static.wikia.nocookie.net/mariokart/images/3/31/FlameRiderBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123942',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d0/VarmintBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123951',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/MrScootyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123925',\n 'https://static.wikia.nocookie.net/mariokart/images/1/12/JetBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102123928',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/YoshiBikeBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925193256',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/MK8_MasterCycle.png/revision/latest/scale-to-width-down/100?cb=20150331231734',\n 'https://static.wikia.nocookie.net/mariokart/images/3/3e/150px-MK8D_Master_Cycle_Zero.png/revision/latest/scale-to-width-down/111?cb=20200726154936',\n 'https://static.wikia.nocookie.net/mariokart/images/9/90/MK8CityTripper.png/revision/latest/scale-to-width-down/100?cb=20150426175601',\n 'https://static.wikia.nocookie.net/mariokart/images/2/23/StandardATVBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124111',\n 'https://static.wikia.nocookie.net/mariokart/images/a/aa/WildWigglerBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20200925175122',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fa/TeddyBuggyBodyMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124120',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0a/MK8BoneRattler.png/revision/latest/scale-to-width-down/100?cb=20150426180108',\n 'https://static.wikia.nocookie.net/mariokart/images/6/63/MK8DX_Splat_Buggy.png/revision/latest/scale-to-width-down/100?cb=20170706064814',\n 'https://static.wikia.nocookie.net/mariokart/images/e/eb/MK8DX_Inkstriker.png/revision/latest/scale-to-width-down/100?cb=20170706065507',\n]\n\ntire_names = [\n 'Standard',\n 'Monster',\n 'Roller',\n 'Slim',\n 'Slick',\n 'Metal',\n 'Button',\n 'Off-Road',\n 'Sponge',\n 'Wood',\n 'Cushion',\n 'Blue Standard',\n 'Hot Monster',\n 'Azure Roller',\n 'Crimson Slim',\n 'Cyber Slick',\n 'Retro Off-Road',\n 'Gold Tires',\n 'GLA Tires',\n 'Triforce Tires',\n 'Ancient Tyres',\n 'Leaf Tires',\n]\n\ntire_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/StandardTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125545',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/MonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125541',\n 'https://static.wikia.nocookie.net/mariokart/images/7/76/RollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125539',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f8/SlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125536',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/SlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125542',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MetalTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124533',\n 'https://static.wikia.nocookie.net/mariokart/images/0/07/ButtonTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124541',\n 'https://static.wikia.nocookie.net/mariokart/images/2/25/Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102124559',\n 'https://static.wikia.nocookie.net/mariokart/images/4/4c/SpongeTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124549',\n 'https://static.wikia.nocookie.net/mariokart/images/0/03/WoodTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124724',\n 'https://static.wikia.nocookie.net/mariokart/images/9/92/CushionTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124817',\n 'https://static.wikia.nocookie.net/mariokart/images/d/db/Blue_Standard.png/revision/latest/scale-to-width-down/100?cb=20141102124836',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d1/HotMonsterTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102124834',\n 'https://static.wikia.nocookie.net/mariokart/images/f/fe/AzureRollerTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20200726154338',\n 'https://static.wikia.nocookie.net/mariokart/images/7/71/CrimsonSlimTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125627',\n 'https://static.wikia.nocookie.net/mariokart/images/2/29/CyberSlickTiresMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125626',\n 'https://static.wikia.nocookie.net/mariokart/images/4/48/Retro_Off-Road.png/revision/latest/scale-to-width-down/100?cb=20141102125629',\n 'https://static.wikia.nocookie.net/mariokart/images/5/52/Gold_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20141102125630',\n 'https://static.wikia.nocookie.net/mariokart/images/b/ba/GLATires-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180539',\n 'https://static.wikia.nocookie.net/mariokart/images/0/09/MK8_TriforceTires.png/revision/latest/scale-to-width-down/100?cb=20150331233357',\n 'https://static.wikia.nocookie.net/mariokart/images/d/d5/MK8D_Ancient_Tires.png/revision/latest/scale-to-width-down/100?cb=20200726154442',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f9/Leaf_Tires_MK8.png/revision/latest/scale-to-width-down/100?cb=20150426180810',\n]\n\nglider_names = [\n 'Super Glider',\n 'Cloud Glider',\n 'Wario Wing',\n 'Waddle Wing',\n 'Peach Parasol',\n 'Parachute',\n 'Parafoil',\n 'Flower Glider',\n 'Bowser Kite',\n 'Plane Glider',\n 'MKTV Parafoil',\n 'Gold Glider',\n 'Hylian Kite',\n 'Paraglider',\n 'Paper Glider',\n]\n\nglider_urls = [\n 'https://static.wikia.nocookie.net/mariokart/images/a/a8/SuperGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125815',\n 'https://static.wikia.nocookie.net/mariokart/images/8/84/Cloud_Glider.png/revision/latest/scale-to-width-down/100?cb=20141102125838',\n 'https://static.wikia.nocookie.net/mariokart/images/a/ae/WarioWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125853',\n 'https://static.wikia.nocookie.net/mariokart/images/e/ef/WaddleWingMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125901',\n 'https://static.wikia.nocookie.net/mariokart/images/6/6e/PeachParasolGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125940',\n 'https://static.wikia.nocookie.net/mariokart/images/d/dd/ParachuteGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125823',\n 'https://static.wikia.nocookie.net/mariokart/images/c/c4/ParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125830',\n 'https://static.wikia.nocookie.net/mariokart/images/b/b3/FlowerGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125846',\n 'https://static.wikia.nocookie.net/mariokart/images/f/f7/BowserKiteMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125909',\n 'https://static.wikia.nocookie.net/mariokart/images/c/ca/PlaneGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125930',\n 'https://static.wikia.nocookie.net/mariokart/images/9/96/MKTVParafoilGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125947',\n 'https://static.wikia.nocookie.net/mariokart/images/1/18/GoldGliderMK8.png/revision/latest/scale-to-width-down/100?cb=20141102125956',\n 'https://static.wikia.nocookie.net/mariokart/images/6/62/MK8_HylianKite.png/revision/latest/scale-to-width-down/100?cb=20150331232731',\n 'https://static.wikia.nocookie.net/mariokart/images/3/39/MK8D_Paraglider.png/revision/latest/scale-to-width-down/117?cb=20200726155246',\n 'https://static.wikia.nocookie.net/mariokart/images/0/0e/PaperGliderIcon-MK8.png/revision/latest/scale-to-width-down/100?cb=20150426181313',\n]\n\n\nx=0\ny=0\nfor char in char_names:\n index=x-y+1\n name = char_names[x]\n if \"Yoshi (\" in name or \"Shyguy (\" in name or \"(G)\" in name:\n y+=1\n index=None\n new_char = Character(name=char_names[x],image_url=char_urls[x],index=index)\n new_char.save()\n x+=1\n\nx=0\nfor tire in tire_names:\n index=x+1\n new_tire = Tire(name=tire_names[x],image_url=tire_urls[x],index=index)\n new_tire.save()\n x+=1\nx=0\nfor car in car_names:\n index=x+1\n new_car = Vehicle(name=car_names[x],image_url=car_urls[x],index=index)\n new_car.save()\n x+=1\nx=0\nfor glider in glider_names:\n index=x+1\n new_glider = Glider(name=glider_names[x],image_url=glider_urls[x],index=index)\n new_glider.save()\n x+=1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
REFERENCE a table with a FOREIGN KEY
In your database, you want the professors table to reference the universities table. You can do that by specifying a column in professors table that references a column in the universities table.
As just shown in the video, the syntax for that looks like this:
ALTER TABLE a
ADD CONSTRAINT a_fkey FOREIGN KEY (b_id) REFERENCES b (id);
Table a should now refer to table b, via b_id, which points to id. a_fkey is, as usual, a constraint name you can choose on your own.
Pay attention to the naming convention employed here: Usually, a foreign key referencing another primary key with name id is named x_id, where x is the name of the referencing table in the singular form.
Instructions
100 XP
1 Rename the university_shortname column to university_id in professors.
2 Add a foreign key on university_id column in professors that references the id column in universities.
Name this foreign key professors_fkey.
'''
-- Rename the university_shortname column
ALTER TABLE professors
RENAME COLUMN university_shortname TO university_id;
-- Add a foreign key on professors referencing universities
ALTER TABLE professors
ADD CONSTRAINT professors_fkey FOREIGN KEY (university_id) REFERENCES universities (id);
|
normal
|
{
"blob_id": "deaa458e51a7a53dd954d772f9e3b1734508cf28",
"index": 6770,
"step-1": "'''\nREFERENCE a table with a FOREIGN KEY\n\nIn your database, you want the professors table to reference the universities table. You can do that by specifying a column in professors table that references a column in the universities table.\n\nAs just shown in the video, the syntax for that looks like this:\n\nALTER TABLE a\nADD CONSTRAINT a_fkey FOREIGN KEY (b_id) REFERENCES b (id);\n\nTable a should now refer to table b, via b_id, which points to id. a_fkey is, as usual, a constraint name you can choose on your own.\n\nPay attention to the naming convention employed here: Usually, a foreign key referencing another primary key with name id is named x_id, where x is the name of the referencing table in the singular form.\n\nInstructions\n100 XP\n\n 1 Rename the university_shortname column to university_id in professors.\n\n 2 Add a foreign key on university_id column in professors that references the id column in universities.\n Name this foreign key professors_fkey.\n\n'''\n\n\n-- Rename the university_shortname column\nALTER TABLE professors\nRENAME COLUMN university_shortname TO university_id;\n\n\n-- Add a foreign key on professors referencing universities\nALTER TABLE professors\nADD CONSTRAINT professors_fkey FOREIGN KEY (university_id) REFERENCES universities (id);",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import tensorflow as tf
import numpy as np
import math
import sys
import os
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tf_util
# from transform_nets import input_transform_net, feature_transform_net
import tf_util_loss
class Network:
def placeholder_inputs(self,batch_size, num_point):
# with tf.variable_scope('inputs') as ip:
source_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
return source_pointclouds_pl
def get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):
""" Classification PointNet, input is BxNx3, output Bx40 """
# with tf.variable_scope('PointNet') as pn:
# Comment above two lines to have same points for loss and features and also change the variable names in the next line.
batch_size = source_pointclouds_pl.get_shape()[0].value
num_point = source_pointclouds_pl.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(source_pointclouds_pl, -1)
net = tf_util.conv2d(input_image, 128, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay, activation_fn=None)
# Symmetric function: max pooling
source_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_feature = tf.tile(source_feature, [1, num_point, 1, 1])
source_feature = tf.concat([net, source_feature], axis=3)
net = tf_util.conv2d(source_feature, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay, activation_fn=None)
source_global_feature = tf_util.max_pool2d(net, [num_point, 1],
padding='VALID', scope='maxpool')
source_global_feature = tf.reshape(source_global_feature, [batch_size, -1])
return source_global_feature
def decode_data(self, source_global_feature, is_training, bn_decay=None):
batch_size = source_global_feature.get_shape()[0].value
net = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')
predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])
return predicted_pointclouds_pl
def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))
loss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)
return loss
if __name__=='__main__':
with tf.Graph().as_default():
net = Network()
inputs = tf.zeros((32,1024,3))
outputs = net.get_model(inputs, 1024, tf.constant(True))
print(outputs)
|
normal
|
{
"blob_id": "e4a0f26afe8c78e4abbd85834c96ed5ba84e1f0b",
"index": 3894,
"step-1": "<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\n<mask token>\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-4": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\nimport tf_util_loss\n\n\nclass Network:\n\n def placeholder_inputs(self, batch_size, num_point):\n source_pointclouds_pl = tf.placeholder(tf.float32, shape=(\n batch_size, num_point, 3))\n return source_pointclouds_pl\n\n def get_model(self, source_pointclouds_pl, feature_size, is_training,\n bn_decay=None):\n \"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n batch_size = source_pointclouds_pl.get_shape()[0].value\n num_point = source_pointclouds_pl.get_shape()[1].value\n end_points = {}\n input_image = tf.expand_dims(source_pointclouds_pl, -1)\n net = tf_util.conv2d(input_image, 128, [1, 3], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv1',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 256, [1, 1], padding='VALID', stride=[1, \n 1], bn=True, is_training=is_training, scope='conv2', bn_decay=\n bn_decay, activation_fn=None)\n source_feature = tf_util.max_pool2d(net, [num_point, 1], padding=\n 'VALID', scope='maxpool')\n source_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n source_feature = tf.concat([net, source_feature], axis=3)\n net = tf_util.conv2d(source_feature, 512, [1, 1], padding='VALID',\n stride=[1, 1], bn=True, is_training=is_training, scope='conv3',\n bn_decay=bn_decay)\n net = tf_util.conv2d(net, 1024, [1, 1], padding='VALID', stride=[1,\n 1], bn=True, is_training=is_training, scope='conv4', bn_decay=\n bn_decay, activation_fn=None)\n source_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n padding='VALID', scope='maxpool')\n source_global_feature = tf.reshape(source_global_feature, [\n batch_size, -1])\n return source_global_feature\n\n def decode_data(self, source_global_feature, is_training, bn_decay=None):\n batch_size = source_global_feature.get_shape()[0].value\n net = tf_util.fully_connected(source_global_feature, 1024, bn=True,\n is_training=is_training, scope='fc1', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=\n is_training, scope='fc2', bn_decay=bn_decay)\n net = tf_util.fully_connected(net, 1024 * 3, activation_fn=None,\n scope='fc3')\n predicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n return predicted_pointclouds_pl\n\n def get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n with tf.variable_scope('loss') as LossEvaluation:\n loss = tf_util_loss.chamfer(predicted_pointclouds_pl,\n source_pointclouds_pl)\n return loss\n\n\nif __name__ == '__main__':\n with tf.Graph().as_default():\n net = Network()\n inputs = tf.zeros((32, 1024, 3))\n outputs = net.get_model(inputs, 1024, tf.constant(True))\n print(outputs)\n",
"step-5": "import tensorflow as tf\nimport numpy as np\nimport math\nimport sys\nimport os\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../utils'))\nimport tf_util\n# from transform_nets import input_transform_net, feature_transform_net\nimport tf_util_loss\n\nclass Network:\n\tdef placeholder_inputs(self,batch_size, num_point):\n\t\t# with tf.variable_scope('inputs') as ip:\n\t\tsource_pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))\n\t\treturn source_pointclouds_pl\n\n\tdef get_model(self, source_pointclouds_pl, feature_size, is_training, bn_decay=None):\n\t\t\"\"\" Classification PointNet, input is BxNx3, output Bx40 \"\"\"\n\t\t# with tf.variable_scope('PointNet') as pn:\n\n\t\t# Comment above two lines to have same points for loss and features and also change the variable names in the next line.\n\t\tbatch_size = source_pointclouds_pl.get_shape()[0].value\n\t\tnum_point = source_pointclouds_pl.get_shape()[1].value\n\t\tend_points = {}\n\n\t\tinput_image = tf.expand_dims(source_pointclouds_pl, -1)\n\n\t\tnet = tf_util.conv2d(input_image, 128, [1,3],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv1', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 256, [1,1],\n\t\t\t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv2', bn_decay=bn_decay, activation_fn=None)\n\n\t\t# Symmetric function: max pooling\n\t\tsource_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t\t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_feature = tf.tile(source_feature, [1, num_point, 1, 1])\n\t\tsource_feature = tf.concat([net, source_feature], axis=3)\n\t\t\n\t\tnet = tf_util.conv2d(source_feature, 512, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t\t\t\t\t\t bn=True, is_training=is_training,\n\t\t\t\t\t\t\t scope='conv3', bn_decay=bn_decay)\n\n\t\tnet = tf_util.conv2d(net, 1024, [1,1],\n\t\t \t\t\t\t\t padding='VALID', stride=[1,1],\n\t\t \t\t\t\t\t bn=True, is_training=is_training,\n\t\t \t\t\t\t\t scope='conv4', bn_decay=bn_decay, activation_fn=None)\n\t\tsource_global_feature = tf_util.max_pool2d(net, [num_point, 1],\n\t\t \t\t\t\t\t\t padding='VALID', scope='maxpool')\n\t\tsource_global_feature = tf.reshape(source_global_feature, [batch_size, -1])\n\n\t\treturn source_global_feature\n\n\tdef decode_data(self, source_global_feature, is_training, bn_decay=None):\n\t\tbatch_size = source_global_feature.get_shape()[0].value\n\t\tnet = tf_util.fully_connected(source_global_feature, 1024, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)\n\t\tnet = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)\t\t\n\t\tnet = tf_util.fully_connected(net, 1024*3, activation_fn=None, scope='fc3')\n\t\tpredicted_pointclouds_pl = tf.reshape(net, [batch_size, 1024, 3])\n\t\treturn predicted_pointclouds_pl\n\n\tdef get_loss_b(self, predicted_pointclouds_pl, source_pointclouds_pl):\n\t\twith tf.variable_scope('loss') as LossEvaluation:\n\t\t\t# loss = tf.reduce_mean(tf.square(tf.subtract(predicted_pointclouds_pl, source_pointclouds_pl)))\n\t\t\tloss = tf_util_loss.chamfer(predicted_pointclouds_pl, source_pointclouds_pl)\n\t\treturn loss\n\nif __name__=='__main__':\n\twith tf.Graph().as_default():\n\t\tnet = Network()\n\t\tinputs = tf.zeros((32,1024,3))\n\t\toutputs = net.get_model(inputs, 1024, tf.constant(True))\n\t\tprint(outputs)",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import random
from PyQt4.QtGui import (
QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut,
QKeySequence, QFileDialog, QMessageBox)
from PyQt4 import QtCore
class Controls(QWidget):
def __init__(self, parent):
super(Controls, self).__init__(parent)
self.layout = QHBoxLayout(self)
self.openButton = QPushButton('Open', self)
self.layout.addWidget(self.openButton)
self.playPauseButton = QPushButton('Play', self) # TODO implement pausing
self.layout.addWidget(self.playPauseButton)
self.nextButton = QPushButton('Next', self)
self.layout.addWidget(self.nextButton)
self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)
self.__nextShortcut.activated.connect(self.nextButton.click)
self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self)
self.__playPauseShortcut.activated.connect(self.playPauseButton.click)
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str) # arg is path to file
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(
self, "Open", self.__music_dir, "Mp3 Files (*.mp3)")
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, "Open error", error.message)
|
normal
|
{
"blob_id": "4e86dd74374297c3b0ce8fea93910003dac7d5d7",
"index": 8742,
"step-1": "<mask token>\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-2": "<mask token>\n\n\nclass Controls(QWidget):\n <mask token>\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-3": "<mask token>\n\n\nclass Controls(QWidget):\n\n def __init__(self, parent):\n super(Controls, self).__init__(parent)\n self.layout = QHBoxLayout(self)\n self.openButton = QPushButton('Open', self)\n self.layout.addWidget(self.openButton)\n self.playPauseButton = QPushButton('Play', self)\n self.layout.addWidget(self.playPauseButton)\n self.nextButton = QPushButton('Next', self)\n self.layout.addWidget(self.nextButton)\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\n self.__nextShortcut.activated.connect(self.nextButton.click)\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self\n )\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-4": "import random\nfrom PyQt4.QtGui import QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut, QKeySequence, QFileDialog, QMessageBox\nfrom PyQt4 import QtCore\n\n\nclass Controls(QWidget):\n\n def __init__(self, parent):\n super(Controls, self).__init__(parent)\n self.layout = QHBoxLayout(self)\n self.openButton = QPushButton('Open', self)\n self.layout.addWidget(self.openButton)\n self.playPauseButton = QPushButton('Play', self)\n self.layout.addWidget(self.playPauseButton)\n self.nextButton = QPushButton('Next', self)\n self.layout.addWidget(self.nextButton)\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\n self.__nextShortcut.activated.connect(self.nextButton.click)\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self\n )\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\n\n\nclass MainWindow(QMainWindow):\n playSong = QtCore.pyqtSignal(str)\n\n def __init__(self, music_dir):\n super(MainWindow, self).__init__()\n self.__music_dir = music_dir\n self.resize(400, 70)\n self.move(0, 0)\n self.setWindowTitle('Drink')\n self.setWindowIcon(QIcon('icon.png'))\n self.controls = Controls(self)\n self.setCentralWidget(self.controls)\n self.controls.openButton.clicked.connect(self.open)\n self.show()\n\n def open(self):\n try:\n fileName = QFileDialog.getOpenFileName(self, 'Open', self.\n __music_dir, 'Mp3 Files (*.mp3)')\n self.playSong.emit(fileName)\n except Exception as error:\n QMessageBox.critical(self, 'Open error', error.message)\n",
"step-5": "import random\r\n\r\nfrom PyQt4.QtGui import (\r\n QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut,\r\n QKeySequence, QFileDialog, QMessageBox)\r\nfrom PyQt4 import QtCore\r\n\r\nclass Controls(QWidget):\r\n def __init__(self, parent): \r\n super(Controls, self).__init__(parent)\r\n self.layout = QHBoxLayout(self)\r\n\r\n self.openButton = QPushButton('Open', self)\r\n self.layout.addWidget(self.openButton)\r\n\r\n self.playPauseButton = QPushButton('Play', self) # TODO implement pausing\r\n self.layout.addWidget(self.playPauseButton)\r\n\r\n self.nextButton = QPushButton('Next', self)\r\n self.layout.addWidget(self.nextButton)\r\n \r\n self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)\r\n self.__nextShortcut.activated.connect(self.nextButton.click)\r\n\r\n self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self)\r\n self.__playPauseShortcut.activated.connect(self.playPauseButton.click)\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n playSong = QtCore.pyqtSignal(str) # arg is path to file\r\n\r\n def __init__(self, music_dir):\r\n super(MainWindow, self).__init__()\r\n\r\n self.__music_dir = music_dir\r\n\r\n self.resize(400, 70)\r\n self.move(0, 0)\r\n self.setWindowTitle('Drink')\r\n self.setWindowIcon(QIcon('icon.png'))\r\n \r\n self.controls = Controls(self)\r\n self.setCentralWidget(self.controls)\r\n\r\n self.controls.openButton.clicked.connect(self.open)\r\n\r\n self.show()\r\n\r\n def open(self):\r\n try:\r\n fileName = QFileDialog.getOpenFileName(\r\n self, \"Open\", self.__music_dir, \"Mp3 Files (*.mp3)\")\r\n self.playSong.emit(fileName)\r\n except Exception as error:\r\n QMessageBox.critical(self, \"Open error\", error.message)\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from typing import Any
from electionguard.ballot import CiphertextAcceptedBallot
from electionguard.decryption import compute_decryption_share_for_ballot
from electionguard.election import CiphertextElectionContext
from electionguard.scheduler import Scheduler
from electionguard.serializable import write_json_object
from fastapi import APIRouter, Body, Depends
from app.core.scheduler import get_scheduler
from ..models import (
convert_guardian,
DecryptBallotSharesRequest,
DecryptBallotSharesResponse,
)
from ..tags import TALLY
router = APIRouter()
@router.post("/decrypt-shares", tags=[TALLY])
def decrypt_ballot_shares(
request: DecryptBallotSharesRequest = Body(...),
scheduler: Scheduler = Depends(get_scheduler),
) -> Any:
"""
Decrypt this guardian's share of one or more ballots
"""
ballots = [
CiphertextAcceptedBallot.from_json_object(ballot)
for ballot in request.encrypted_ballots
]
context = CiphertextElectionContext.from_json_object(request.context)
guardian = convert_guardian(request.guardian)
shares = [
compute_decryption_share_for_ballot(guardian, ballot, context, scheduler)
for ballot in ballots
]
response = DecryptBallotSharesResponse(
shares=[write_json_object(share) for share in shares]
)
return response
|
normal
|
{
"blob_id": "0544c67cb14549e32b6ff8ea3215c6c65c8416ec",
"index": 5542,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/decrypt-shares', tags=[TALLY])\ndef decrypt_ballot_shares(request: DecryptBallotSharesRequest=Body(...),\n scheduler: Scheduler=Depends(get_scheduler)) ->Any:\n \"\"\"\n Decrypt this guardian's share of one or more ballots\n \"\"\"\n ballots = [CiphertextAcceptedBallot.from_json_object(ballot) for ballot in\n request.encrypted_ballots]\n context = CiphertextElectionContext.from_json_object(request.context)\n guardian = convert_guardian(request.guardian)\n shares = [compute_decryption_share_for_ballot(guardian, ballot, context,\n scheduler) for ballot in ballots]\n response = DecryptBallotSharesResponse(shares=[write_json_object(share) for\n share in shares])\n return response\n",
"step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/decrypt-shares', tags=[TALLY])\ndef decrypt_ballot_shares(request: DecryptBallotSharesRequest=Body(...),\n scheduler: Scheduler=Depends(get_scheduler)) ->Any:\n \"\"\"\n Decrypt this guardian's share of one or more ballots\n \"\"\"\n ballots = [CiphertextAcceptedBallot.from_json_object(ballot) for ballot in\n request.encrypted_ballots]\n context = CiphertextElectionContext.from_json_object(request.context)\n guardian = convert_guardian(request.guardian)\n shares = [compute_decryption_share_for_ballot(guardian, ballot, context,\n scheduler) for ballot in ballots]\n response = DecryptBallotSharesResponse(shares=[write_json_object(share) for\n share in shares])\n return response\n",
"step-4": "from typing import Any\nfrom electionguard.ballot import CiphertextAcceptedBallot\nfrom electionguard.decryption import compute_decryption_share_for_ballot\nfrom electionguard.election import CiphertextElectionContext\nfrom electionguard.scheduler import Scheduler\nfrom electionguard.serializable import write_json_object\nfrom fastapi import APIRouter, Body, Depends\nfrom app.core.scheduler import get_scheduler\nfrom ..models import convert_guardian, DecryptBallotSharesRequest, DecryptBallotSharesResponse\nfrom ..tags import TALLY\nrouter = APIRouter()\n\n\[email protected]('/decrypt-shares', tags=[TALLY])\ndef decrypt_ballot_shares(request: DecryptBallotSharesRequest=Body(...),\n scheduler: Scheduler=Depends(get_scheduler)) ->Any:\n \"\"\"\n Decrypt this guardian's share of one or more ballots\n \"\"\"\n ballots = [CiphertextAcceptedBallot.from_json_object(ballot) for ballot in\n request.encrypted_ballots]\n context = CiphertextElectionContext.from_json_object(request.context)\n guardian = convert_guardian(request.guardian)\n shares = [compute_decryption_share_for_ballot(guardian, ballot, context,\n scheduler) for ballot in ballots]\n response = DecryptBallotSharesResponse(shares=[write_json_object(share) for\n share in shares])\n return response\n",
"step-5": "from typing import Any\nfrom electionguard.ballot import CiphertextAcceptedBallot\nfrom electionguard.decryption import compute_decryption_share_for_ballot\nfrom electionguard.election import CiphertextElectionContext\nfrom electionguard.scheduler import Scheduler\nfrom electionguard.serializable import write_json_object\nfrom fastapi import APIRouter, Body, Depends\n\nfrom app.core.scheduler import get_scheduler\nfrom ..models import (\n convert_guardian,\n DecryptBallotSharesRequest,\n DecryptBallotSharesResponse,\n)\nfrom ..tags import TALLY\n\nrouter = APIRouter()\n\n\[email protected](\"/decrypt-shares\", tags=[TALLY])\ndef decrypt_ballot_shares(\n request: DecryptBallotSharesRequest = Body(...),\n scheduler: Scheduler = Depends(get_scheduler),\n) -> Any:\n \"\"\"\n Decrypt this guardian's share of one or more ballots\n \"\"\"\n ballots = [\n CiphertextAcceptedBallot.from_json_object(ballot)\n for ballot in request.encrypted_ballots\n ]\n context = CiphertextElectionContext.from_json_object(request.context)\n guardian = convert_guardian(request.guardian)\n\n shares = [\n compute_decryption_share_for_ballot(guardian, ballot, context, scheduler)\n for ballot in ballots\n ]\n\n response = DecryptBallotSharesResponse(\n shares=[write_json_object(share) for share in shares]\n )\n\n return response\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import socket
from time import time, sleep
from threading import Thread
# Define drone
class dm107s():
# Default control value
def __init__(self):
# 4 values for flight
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
# 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration
self.commands = 0
# Required for wifi control
self.onoff = 1
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple calibrate button presses
self._calibrate_flag = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
#self.sess.connect(('192.168.100.1', 19798))
# Initialize timer value
self._takeoff_timer = 0
self._calibrate_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Turn hex to byte package
def _get_packet(self):
self._hex_code = self.get_hex()
self.package = bytes.fromhex(self._hex_code)
return self.package
# Send control to drone
def send_ctrl(self):
while not self._stopped:
self._package = self._get_packet()
#self.sess.send(self._package)
self.sess.sendto(self._package, ('192.168.100.1', 19798))
self.Flag_off()
sleep(0.02)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 0
self.onoff = 1
self._takeoff_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [128, 128, 128, 128]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 255:
self._value_to_change[x] = 255
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
self.roll += 20
if self.roll > 248:
self.roll = 248
# Pitch forward
def pitch_fwd(self):
self.pitch += 20
if self.pitch > 248:
self.pitch = 248
# Increase throttle
def throttle_up(self):
self.throttle += 20
if self.throttle > 248:
self.throttle = 248
# Yaw right
def yaw_right(self):
self.yaw -= 20
if self.yaw < 18:
self.yaw = 18
# Roll left
def roll_left(self):
self.roll -= 20
if self.roll < 18:
self.roll = 18
# Pitch backward
def pitch_bwd(self):
self.pitch -= 20
if self.pitch < 18:
self.pitch = 18
# Decrease throttle
def throttle_dwn(self):
self.throttle -= 20
if self.throttle < 18:
self.throttle = 18
# Yaw left
def yaw_left(self):
self.yaw += 20
if self.yaw > 248:
self.yaw = 248
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Landing
def land(self):
if self._takeoff_flag == False:
self.commands = 1
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):
self.commands = 0
self._takeoff_flag = False
if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):
self.commands = 0
self.onoff = 1
self._calibrate_flag = False
# Stop IMMEDIATELY
def emergency_stop(self):
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 2
self.onoff = 1
self._takeoff_flag = False
# Calibrate gyroscope
def calib_gyro(self):
if self._calibrate_flag == False:
self.roll = 128
self.pitch = 128
self.throttle = 128
self.yaw = 128
self.commands = 4
self.onoff = 0
self._calibrate_flag = True
self._calibrate_timer = time()
class naza():
# Default control value
def __init__(self, ip, port):
# 4 values for flight
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
# Prevent multiple takeoff button presses
self._takeoff_flag = False
# Prevent multiple ignite button presses
self._ignite_flag = False
self._ignite_send = False
# Connect to UDP port
self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.ip = ip
self.port = port
#self.sess.connect((ip, port))
# Initialize timer value
self._ignite_timer = 0
self._takeoff_timer = 0
# Flag to stop thread
self._stopped = False
# Start separated thread for drone control
def start(self):
self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)
self._thread.start()
return self
# Get command hex for drone
def get_hex(self):
# XOR is for checksum
self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)
self.command_out = hex(self.command_out)[2::]
return self.command_out
# Send control to drone
def send_ctrl(self):
while not self._stopped:
if self._ignite_send == True:
ignite_msg = 'st'
self._package = ignite_msg.encode()
else:
self._package = self.get_hex().encode()
#self.sess.send(self._package)
self.sess.sendto(self._package, (self.ip, self.port))
self.Flag_off()
sleep(0.05)
# Close connection to drone
def close_connection(self):
self._stopped = True
if self._thread.daemon == False:
self._thread.join()
self.sess.close()
# Return to default
def default(self):
self.roll = 8
self.pitch = 8
self.throttle = 8
self.yaw = 8
self._takeoff_flag = False
self._ignite_flag = False
# Increment control
def incremt(self, rl, pt, th, yw):
self._value_to_change = [8, 8, 8, 8]
self._change_val = [rl, pt, th, yw]
for x in range(len(self._value_to_change)):
self._value_to_change[x] += self._change_val[x]
if self._value_to_change[x] <= 0:
self._value_to_change[x] = 0
if self._value_to_change[x] >= 15:
self._value_to_change[x] = 15
[self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change
# Roll right
def roll_right(self):
if self.roll < 15:
self.roll += 1
# Pitch forward
def pitch_fwd(self):
if self.pitch < 15:
self.pitch += 1
# Increase throttle
def throttle_up(self):
if self.throttle < 15:
self.throttle += 1
# Yaw right
def yaw_right(self):
if self.yaw < 15:
self.yaw += 1
# Roll left
def roll_left(self):
if self.roll > 0:
self.roll -= 1
# Pitch backward
def pitch_bwd(self):
if self.pitch > 0:
self.pitch -= 1
# Decrease throttle
def throttle_dwn(self):
if self.throttle > 0:
self.throttle -= 1
# Yaw left
def yaw_left(self):
if self.yaw > 0:
self.yaw -= 1
# Start engine
def ignite(self):
if self._ignite_flag == False:
self._ignite_flag = True
self._ignite_send = True
self._ignite_timer = time()
# Takeoff
def takeoff(self):
if self._takeoff_flag == False:
self.throttle = 12
self._takeoff_flag = True
self._takeoff_timer = time()
# Flip takeoff flag
def Flag_off(self):
if self._ignite_flag == True:
if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):
self._ignite_send = False
self.roll = 8
self.pitch = 8
self.yaw = 8
self.throttle = 0
# Warming up engine
if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):
self.throttle = 2
if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):
self.throttle = 4
if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):
self.throttle = 6
if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):
self.throttle = 8
# After starting engine, takeoff after 4s
if (time() - self._ignite_timer >= 4):
self._ignite_flag = False
self.takeoff()
if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):
self.throttle = 8
self._takeoff_flag = False
|
normal
|
{
"blob_id": "ee8e117db0348aa37d6aa37e6c06255101f1cff4",
"index": 2752,
"step-1": "<mask token>\n\n\nclass dm107s:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-2": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n <mask token>\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n <mask token>\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-3": "<mask token>\n\n\nclass dm107s:\n <mask token>\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n <mask token>\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n <mask token>\n <mask token>\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n <mask token>\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n <mask token>\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-4": "<mask token>\n\n\nclass dm107s:\n\n def __init__(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n self._calibrate_flag = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (26122 << 144 | self.roll << 136 | self.pitch <<\n 128 | self.throttle << 120 | self.yaw << 112 | self.commands <<\n 104 | self.onoff * 2 << 96 | 65535 << 80 | (self.roll ^ self.\n pitch ^ self.throttle ^ self.yaw ^ self.commands ^ self.onoff *\n 2) << 8 | 153)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n <mask token>\n\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n <mask token>\n\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\n\nclass naza:\n\n def __init__(self, ip, port):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n self._ignite_send = False\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n self._ignite_timer = 0\n self._takeoff_timer = 0\n self._stopped = False\n\n def start(self):\n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n\n def get_hex(self):\n self.command_out = (self.throttle << 12 | self.yaw << 8 | self.\n pitch << 4 | self.roll)\n self.command_out = hex(self.command_out)[2:]\n return self.command_out\n\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw\n ] = self._value_to_change\n\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n\n def Flag_off(self):\n if self._ignite_flag == True:\n if time() - self._ignite_timer >= 1 and time(\n ) - self._ignite_timer < 1.5:\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n if time() - self._ignite_timer >= 1.5 and time(\n ) - self._ignite_timer < 2:\n self.throttle = 2\n if time() - self._ignite_timer >= 2 and time(\n ) - self._ignite_timer < 2.5:\n self.throttle = 4\n if time() - self._ignite_timer >= 2.5 and time(\n ) - self._ignite_timer < 3:\n self.throttle = 6\n if time() - self._ignite_timer >= 3 and time(\n ) - self._ignite_timer < 4:\n self.throttle = 8\n if time() - self._ignite_timer >= 4:\n self._ignite_flag = False\n self.takeoff()\n if self._takeoff_flag == True and time() - self._takeoff_timer >= 4:\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-5": "import socket\nfrom time import time, sleep\nfrom threading import Thread\n\n# Define drone\nclass dm107s():\n # Default control value\n def __init__(self):\n # 4 values for flight\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n # 0 - normal mode, 2 - emergency stop, 4 - gyroscope calibration\n self.commands = 0\n # Required for wifi control\n self.onoff = 1\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple calibrate button presses\n self._calibrate_flag = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n #self.sess.connect(('192.168.100.1', 19798))\n # Initialize timer value\n self._takeoff_timer = 0\n self._calibrate_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=((26122<<144)|self.roll<<136|self.pitch<<128|self.throttle<<120|self.yaw<<112|self.commands<<104|self.onoff*2<<96|65535<<80|(self.roll^self.pitch^self.throttle^self.yaw^self.commands^(self.onoff*2))<<8|153)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Turn hex to byte package\n def _get_packet(self):\n self._hex_code = self.get_hex()\n self.package = bytes.fromhex(self._hex_code)\n return self.package\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n self._package = self._get_packet()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, ('192.168.100.1', 19798))\n self.Flag_off()\n sleep(0.02)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 0\n self.onoff = 1\n self._takeoff_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [128, 128, 128, 128]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 255:\n self._value_to_change[x] = 255\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n self.roll += 20\n if self.roll > 248:\n self.roll = 248\n \n # Pitch forward\n def pitch_fwd(self):\n self.pitch += 20\n if self.pitch > 248:\n self.pitch = 248\n \n # Increase throttle\n def throttle_up(self):\n self.throttle += 20\n if self.throttle > 248:\n self.throttle = 248\n \n # Yaw right\n def yaw_right(self):\n self.yaw -= 20\n if self.yaw < 18:\n self.yaw = 18\n \n # Roll left\n def roll_left(self):\n self.roll -= 20\n if self.roll < 18:\n self.roll = 18\n \n # Pitch backward\n def pitch_bwd(self):\n self.pitch -= 20\n if self.pitch < 18:\n self.pitch = 18\n \n # Decrease throttle\n def throttle_dwn(self):\n self.throttle -= 20\n if self.throttle < 18:\n self.throttle = 18\n \n # Yaw left\n def yaw_left(self):\n self.yaw += 20\n if self.yaw > 248:\n self.yaw = 248\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Landing\n def land(self):\n if self._takeoff_flag == False:\n self.commands = 1\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 1)):\n self.commands = 0\n self._takeoff_flag = False\n if (self._calibrate_flag == True and (time() - self._calibrate_timer >= 3)):\n self.commands = 0\n self.onoff = 1\n self._calibrate_flag = False\n\n # Stop IMMEDIATELY\n def emergency_stop(self):\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 2\n self.onoff = 1\n self._takeoff_flag = False\n \n # Calibrate gyroscope\n def calib_gyro(self):\n if self._calibrate_flag == False:\n self.roll = 128\n self.pitch = 128\n self.throttle = 128\n self.yaw = 128\n self.commands = 4\n self.onoff = 0\n self._calibrate_flag = True\n self._calibrate_timer = time()\n\nclass naza():\n # Default control value\n def __init__(self, ip, port):\n # 4 values for flight\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n # Prevent multiple takeoff button presses\n self._takeoff_flag = False\n # Prevent multiple ignite button presses\n self._ignite_flag = False\n self._ignite_send = False\n # Connect to UDP port\n self.sess = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)\n self.ip = ip\n self.port = port\n #self.sess.connect((ip, port))\n # Initialize timer value\n self._ignite_timer = 0\n self._takeoff_timer = 0\n # Flag to stop thread\n self._stopped = False\n \n # Start separated thread for drone control\n def start(self): \n self._thread = Thread(target=self.send_ctrl, args=(), daemon=True)\n self._thread.start()\n return self\n \n # Get command hex for drone\n def get_hex(self):\n # XOR is for checksum\n self.command_out=(self.throttle<<12|self.yaw<<8|self.pitch<<4|self.roll)\n self.command_out = hex(self.command_out)[2::]\n return self.command_out\n \n # Send control to drone\n def send_ctrl(self):\n while not self._stopped:\n if self._ignite_send == True:\n ignite_msg = 'st'\n self._package = ignite_msg.encode()\n else:\n self._package = self.get_hex().encode()\n #self.sess.send(self._package)\n self.sess.sendto(self._package, (self.ip, self.port))\n self.Flag_off()\n sleep(0.05)\n \n # Close connection to drone\n def close_connection(self):\n self._stopped = True\n if self._thread.daemon == False:\n self._thread.join()\n self.sess.close()\n \n # Return to default\n def default(self):\n self.roll = 8\n self.pitch = 8\n self.throttle = 8\n self.yaw = 8\n self._takeoff_flag = False\n self._ignite_flag = False\n \n # Increment control\n def incremt(self, rl, pt, th, yw):\n self._value_to_change = [8, 8, 8, 8]\n self._change_val = [rl, pt, th, yw]\n for x in range(len(self._value_to_change)):\n self._value_to_change[x] += self._change_val[x]\n if self._value_to_change[x] <= 0:\n self._value_to_change[x] = 0\n if self._value_to_change[x] >= 15:\n self._value_to_change[x] = 15\n [self.roll, self.pitch, self.throttle, self.yaw] = self._value_to_change\n \n # Roll right\n def roll_right(self):\n if self.roll < 15:\n self.roll += 1\n \n # Pitch forward\n def pitch_fwd(self):\n if self.pitch < 15:\n self.pitch += 1\n \n # Increase throttle\n def throttle_up(self):\n if self.throttle < 15:\n self.throttle += 1\n \n # Yaw right\n def yaw_right(self):\n if self.yaw < 15:\n self.yaw += 1\n \n # Roll left\n def roll_left(self):\n if self.roll > 0:\n self.roll -= 1\n \n # Pitch backward\n def pitch_bwd(self):\n if self.pitch > 0:\n self.pitch -= 1\n \n # Decrease throttle\n def throttle_dwn(self):\n if self.throttle > 0:\n self.throttle -= 1\n \n # Yaw left\n def yaw_left(self):\n if self.yaw > 0:\n self.yaw -= 1\n \n # Start engine\n def ignite(self):\n if self._ignite_flag == False:\n self._ignite_flag = True\n self._ignite_send = True\n self._ignite_timer = time()\n \n # Takeoff\n def takeoff(self):\n if self._takeoff_flag == False:\n self.throttle = 12\n self._takeoff_flag = True\n self._takeoff_timer = time()\n \n # Flip takeoff flag\n def Flag_off(self):\n if self._ignite_flag == True:\n if (time() - self._ignite_timer >= 1) and (time() - self._ignite_timer < 1.5):\n self._ignite_send = False\n self.roll = 8\n self.pitch = 8\n self.yaw = 8\n self.throttle = 0\n # Warming up engine\n if (time() - self._ignite_timer >= 1.5) and (time() - self._ignite_timer < 2):\n self.throttle = 2\n if (time() - self._ignite_timer >= 2) and (time() - self._ignite_timer < 2.5):\n self.throttle = 4\n if (time() - self._ignite_timer >= 2.5) and (time() - self._ignite_timer < 3):\n self.throttle = 6\n if (time() - self._ignite_timer >= 3) and (time() - self._ignite_timer < 4):\n self.throttle = 8\n # After starting engine, takeoff after 4s\n if (time() - self._ignite_timer >= 4):\n self._ignite_flag = False\n self.takeoff()\n if (self._takeoff_flag == True and (time() - self._takeoff_timer >= 4)):\n self.throttle = 8\n self._takeoff_flag = False\n",
"step-ids": [
23,
29,
33,
39,
43
]
}
|
[
23,
29,
33,
39,
43
] |
from django.views.generic import ListView
class ExperimentList(ListView):
pass
|
normal
|
{
"blob_id": "10990282c8aa0b9b26a69e451132ff37257acbc6",
"index": 3331,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ExperimentList(ListView):\n pass\n",
"step-3": "from django.views.generic import ListView\n\n\nclass ExperimentList(ListView):\n pass\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Preprocess data obtained for training
Cora and Citeseer datasets are supported by our example, the original versions of these datasets are as follows:
@inproceedings{nr,
title={The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={AAAI},
url={http://networkrepository.com},
year={2015}
}
In this example, we use dataset splits provided by https://github.com/kimiyoung/planetoid (Zhilin Yang, William W. Cohen, Ruslan Salakhutdinov, [Revisiting Semi-Supervised Learning with Graph Embeddings](https://arxiv.org/abs/1603.08861), ICML 2016).
"""
import numpy as np
import mindspore.dataset as ds
def adj_to_bias(adj):
"""Add self loop to adj and make sure only one hop neighbors are engaged in computing"""
num_graphs = adj.shape[0]
adj_temp = np.empty(adj.shape)
for i in range(num_graphs):
adj_temp[i] = adj[i] + np.eye(adj.shape[1])
return -1e9 * (1.0 - adj_temp)
def get_biases_features_labels(data_dir):
"""Get biases, features, labels from Dataset"""
g = ds.GraphData(data_dir)
nodes = g.get_all_nodes(0)
nodes_list = nodes.tolist()
row_tensor = g.get_node_feature(nodes_list, [1, 2])
features = row_tensor[0]
features = features[np.newaxis]
labels = row_tensor[1]
nodes_num = labels.shape[0]
class_num = labels.max() + 1
labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)
neighbor = g.get_all_neighbors(nodes_list, 0)
node_map = {node_id: index for index, node_id in enumerate(nodes_list)}
adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)
for index, value in np.ndenumerate(neighbor):
if value >= 0 and index[1] > 0:
adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1
adj = adj[np.newaxis]
biases = adj_to_bias(adj)
return biases, features, labels_onehot
def get_mask(total, begin, end):
"""Generate mask according to begin and end position"""
mask = np.zeros([total]).astype(np.float32)
mask[begin:end] = 1
return np.array(mask, dtype=np.bool)
def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):
"""Load cora dataset and preprocessing"""
biases, feature, label = get_biases_features_labels(data_dir)
# split training, validation and testing set
nodes_num = label.shape[0]
train_mask = get_mask(nodes_num, 0, train_node_num)
eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)
test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)
y_train = np.zeros(label.shape)
y_val = np.zeros(label.shape)
y_test = np.zeros(label.shape)
y_train[train_mask, :] = label[train_mask, :]
y_val[eval_mask, :] = label[eval_mask, :]
y_test[test_mask, :] = label[test_mask, :]
y_train = y_train[np.newaxis]
y_val = y_val[np.newaxis]
y_test = y_test[np.newaxis]
train_mask = train_mask[np.newaxis]
eval_mask = eval_mask[np.newaxis]
test_mask = test_mask[np.newaxis]
return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask
|
normal
|
{
"blob_id": "eb50f50e3c072c2f6e74ff9ef8c2fa2eef782aae",
"index": 6718,
"step-1": "<mask token>\n\n\ndef adj_to_bias(adj):\n \"\"\"Add self loop to adj and make sure only one hop neighbors are engaged in computing\"\"\"\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1000000000.0 * (1.0 - adj_temp)\n\n\n<mask token>\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask according to begin and end position\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef adj_to_bias(adj):\n \"\"\"Add self loop to adj and make sure only one hop neighbors are engaged in computing\"\"\"\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1000000000.0 * (1.0 - adj_temp)\n\n\ndef get_biases_features_labels(data_dir):\n \"\"\"Get biases, features, labels from Dataset\"\"\"\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n features = features[np.newaxis]\n labels = row_tensor[1]\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = adj[np.newaxis]\n biases = adj_to_bias(adj)\n return biases, features, labels_onehot\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask according to begin and end position\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef adj_to_bias(adj):\n \"\"\"Add self loop to adj and make sure only one hop neighbors are engaged in computing\"\"\"\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1000000000.0 * (1.0 - adj_temp)\n\n\ndef get_biases_features_labels(data_dir):\n \"\"\"Get biases, features, labels from Dataset\"\"\"\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n features = features[np.newaxis]\n labels = row_tensor[1]\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = adj[np.newaxis]\n biases = adj_to_bias(adj)\n return biases, features, labels_onehot\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask according to begin and end position\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)\n\n\ndef load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n \"\"\"Load cora dataset and preprocessing\"\"\"\n biases, feature, label = get_biases_features_labels(data_dir)\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num +\n eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n return (feature, biases, y_train, train_mask, y_val, eval_mask, y_test,\n test_mask)\n",
"step-4": "<mask token>\nimport numpy as np\nimport mindspore.dataset as ds\n\n\ndef adj_to_bias(adj):\n \"\"\"Add self loop to adj and make sure only one hop neighbors are engaged in computing\"\"\"\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1000000000.0 * (1.0 - adj_temp)\n\n\ndef get_biases_features_labels(data_dir):\n \"\"\"Get biases, features, labels from Dataset\"\"\"\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n features = features[np.newaxis]\n labels = row_tensor[1]\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = adj[np.newaxis]\n biases = adj_to_bias(adj)\n return biases, features, labels_onehot\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask according to begin and end position\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)\n\n\ndef load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n \"\"\"Load cora dataset and preprocessing\"\"\"\n biases, feature, label = get_biases_features_labels(data_dir)\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num +\n eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n return (feature, biases, y_train, train_mask, y_val, eval_mask, y_test,\n test_mask)\n",
"step-5": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nPreprocess data obtained for training\nCora and Citeseer datasets are supported by our example, the original versions of these datasets are as follows:\n@inproceedings{nr,\n title={The Network Data Repository with Interactive Graph Analytics and Visualization},\n author={Ryan A. Rossi and Nesreen K. Ahmed},\n booktitle={AAAI},\n url={http://networkrepository.com},\n year={2015}\n}\nIn this example, we use dataset splits provided by https://github.com/kimiyoung/planetoid (Zhilin Yang, William W. Cohen, Ruslan Salakhutdinov, [Revisiting Semi-Supervised Learning with Graph Embeddings](https://arxiv.org/abs/1603.08861), ICML 2016).\n\"\"\"\nimport numpy as np\nimport mindspore.dataset as ds\n\n\ndef adj_to_bias(adj):\n \"\"\"Add self loop to adj and make sure only one hop neighbors are engaged in computing\"\"\"\n num_graphs = adj.shape[0]\n adj_temp = np.empty(adj.shape)\n for i in range(num_graphs):\n adj_temp[i] = adj[i] + np.eye(adj.shape[1])\n return -1e9 * (1.0 - adj_temp)\n\n\ndef get_biases_features_labels(data_dir):\n \"\"\"Get biases, features, labels from Dataset\"\"\"\n g = ds.GraphData(data_dir)\n nodes = g.get_all_nodes(0)\n nodes_list = nodes.tolist()\n row_tensor = g.get_node_feature(nodes_list, [1, 2])\n features = row_tensor[0]\n features = features[np.newaxis]\n\n labels = row_tensor[1]\n\n nodes_num = labels.shape[0]\n class_num = labels.max() + 1\n labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)\n\n neighbor = g.get_all_neighbors(nodes_list, 0)\n node_map = {node_id: index for index, node_id in enumerate(nodes_list)}\n adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)\n for index, value in np.ndenumerate(neighbor):\n if value >= 0 and index[1] > 0:\n adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1\n adj = adj[np.newaxis]\n biases = adj_to_bias(adj)\n\n return biases, features, labels_onehot\n\n\ndef get_mask(total, begin, end):\n \"\"\"Generate mask according to begin and end position\"\"\"\n mask = np.zeros([total]).astype(np.float32)\n mask[begin:end] = 1\n return np.array(mask, dtype=np.bool)\n\n\ndef load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):\n \"\"\"Load cora dataset and preprocessing\"\"\"\n biases, feature, label = get_biases_features_labels(data_dir)\n # split training, validation and testing set\n nodes_num = label.shape[0]\n train_mask = get_mask(nodes_num, 0, train_node_num)\n eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)\n test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)\n\n y_train = np.zeros(label.shape)\n y_val = np.zeros(label.shape)\n y_test = np.zeros(label.shape)\n\n y_train[train_mask, :] = label[train_mask, :]\n y_val[eval_mask, :] = label[eval_mask, :]\n y_test[test_mask, :] = label[test_mask, :]\n\n y_train = y_train[np.newaxis]\n y_val = y_val[np.newaxis]\n y_test = y_test[np.newaxis]\n train_mask = train_mask[np.newaxis]\n eval_mask = eval_mask[np.newaxis]\n test_mask = test_mask[np.newaxis]\n\n return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django import forms
from .models import GetInTouch
class GetInTouchForm(forms.ModelForm):
class Meta:
model = GetInTouch
fields = '__all__'
|
normal
|
{
"blob_id": "c8dc143c09aa7f677167a4942ae1c4a0fbf75128",
"index": 3219,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetInTouchForm(forms.ModelForm):\n\n\n class Meta:\n model = GetInTouch\n fields = '__all__'\n",
"step-3": "from django import forms\nfrom .models import GetInTouch\n\n\nclass GetInTouchForm(forms.ModelForm):\n\n\n class Meta:\n model = GetInTouch\n fields = '__all__'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import kubernetes.client
from kubernetes.client.rest import ApiException
from pprint import pprint
from kubeops_api.models.cluster import Cluster
class ClusterMonitor():
def __init__(self,cluster):
self.cluster = cluster
self.token = self.cluster.get_cluster_token()
self.cluster.change_to()
master = self.cluster.group_set.get(name='master').hosts.first()
configuration = kubernetes.client.Configuration()
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.api_key['authorization'] = self.token
print('---token----')
print(self.token)
configuration.debug = True
configuration.host = 'https://'+master.ip+":6443"
configuration.verify_ssl = False
print('https://'+master.ip+":6443")
self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))
def list_pods(self):
pods = self.api_instance.list_pod_for_all_namespaces()
return pods
|
normal
|
{
"blob_id": "da41f26489c477e0df9735606457bd4ee4e5a396",
"index": 4465,
"step-1": "<mask token>\n\n\nclass ClusterMonitor:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-4": "import kubernetes.client\nfrom kubernetes.client.rest import ApiException\nfrom pprint import pprint\nfrom kubeops_api.models.cluster import Cluster\n\n\nclass ClusterMonitor:\n\n def __init__(self, cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://' + master.ip + ':6443'\n configuration.verify_ssl = False\n print('https://' + master.ip + ':6443')\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.\n ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-5": "import kubernetes.client\nfrom kubernetes.client.rest import ApiException\nfrom pprint import pprint\nfrom kubeops_api.models.cluster import Cluster\n\nclass ClusterMonitor():\n\n def __init__(self,cluster):\n self.cluster = cluster\n self.token = self.cluster.get_cluster_token()\n self.cluster.change_to()\n master = self.cluster.group_set.get(name='master').hosts.first()\n configuration = kubernetes.client.Configuration()\n configuration.api_key_prefix['authorization'] = 'Bearer'\n configuration.api_key['authorization'] = self.token\n print('---token----')\n print(self.token)\n configuration.debug = True\n configuration.host = 'https://'+master.ip+\":6443\"\n configuration.verify_ssl = False\n print('https://'+master.ip+\":6443\")\n self.api_instance = kubernetes.client.CoreV1Api(kubernetes.client.ApiClient(configuration))\n\n def list_pods(self):\n pods = self.api_instance.list_pod_for_all_namespaces()\n return pods\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
import os
import subprocess
import logging
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Recover all ip for one component. Return format ip
def getHostsByKey(config, key):
hosts = config.get(key, "hosts").split(',')
index = 0
for host in hosts:
hosts[index] = host.strip(' \n')
index += 1
return hosts
# Function who return the ip of the current machine
def getIp():
ip = os.popen('ifconfig ens3 | grep "inet ad" | cut -f2 -d: | awk \'{print $1}\'', "r").read()
ip = ip.replace('\n', '')
return ip
# Check if String il already present in the file
def isAlreadyAdd(pathFile, string):
file = open(pathFile)
for line in file:
if string in line:
return True
return False
def deleteLineWithString(pathFile, stringResearch):
contenu = ""
fichier = open(pathFile, "r")
for ligne in fichier:
if not (stringResearch in ligne):
contenu += ligne
fichier.close()
fichier = open('tmp.txt', 'w')
fichier.write(contenu)
fichier.close()
os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')
return
# Function for check host
def hostIsUp(host):
if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):
return False
return True
# Function for recover ip by using server name
def getIpServerName(config, serverName):
ip = ""
value = serverName.split('-')
if len(value) == 2:
try:
hosts = config.get(value[0], "hosts").split(',')
ip = hosts[int(value[1]) - 1].strip(' \n')
except:
return ip
return ip
# Function for update file on specific server
def updateFileServer(config, serverName):
ip = getIpServerName(config, serverName)
out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],
cwd=os.getcwd(),
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)
if out.returncode == 0:
logging.info("Compressing directory done [success]")
else:
logging.error("Compressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'sudo rm -rf SDTD-Mazerunner/script/'])
out = subprocess.run(
['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',
'xnet@' + ip + ':~/'], check=True)
if out.returncode == 0:
logging.info("Transfer done [success]")
else:
logging.error("Transferring files failed [error]")
logging.info("Detar file ...")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'mkdir -p SDTD-Mazerunner/script'])
out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])
if out.returncode == 0:
logging.info("Decompressing directory done [success]")
else:
logging.error("Decompressing directory failed [error]")
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'rm SDTD-Mazerunner-Script.tar.gz'])
return
# Function for install basic environment
def installEnvironmentServer(config, serverName):
ip = getIpServerName(config, serverName)
subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,
'source ~/.profile; ./script/install_config_machine.py'])
return
|
normal
|
{
"blob_id": "2c834c734de8f8740176bb5dbb6b123c49924718",
"index": 1697,
"step-1": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n<mask token>\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-2": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\n<mask token>\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-3": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n<mask token>\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-4": "<mask token>\n\n\nclass color:\n PURPLE = '\\x1b[95m'\n CYAN = '\\x1b[96m'\n DARKCYAN = '\\x1b[36m'\n BLUE = '\\x1b[94m'\n GREEN = '\\x1b[92m'\n YELLOW = '\\x1b[93m'\n RED = '\\x1b[91m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n END = '\\x1b[0m'\n\n\ndef getHostsByKey(config, key):\n hosts = config.get(key, 'hosts').split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\ndef getIp():\n ip = os.popen(\n 'ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'',\n 'r').read()\n ip = ip.replace('\\n', '')\n return ip\n\n\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = ''\n fichier = open(pathFile, 'r')\n for ligne in fichier:\n if not stringResearch in ligne:\n contenu += ligne\n fichier.close()\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\ndef getIpServerName(config, serverName):\n ip = ''\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], 'hosts').split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf',\n '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'], cwd=os.getcwd(), stdout\n =subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info('Compressing directory done [success]')\n else:\n logging.error('Compressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(['scp', '-pq', '-o', 'StrictHostKeyChecking=no',\n '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz', 'xnet@' +\n ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info('Transfer done [success]')\n else:\n logging.error('Transferring files failed [error]')\n logging.info('Detar file ...')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info('Decompressing directory done [success]')\n else:\n logging.error('Decompressing directory failed [error]')\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip, 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i',\n '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-5": "#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport logging\n\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\n# Recover all ip for one component. Return format ip\ndef getHostsByKey(config, key):\n hosts = config.get(key, \"hosts\").split(',')\n index = 0\n for host in hosts:\n hosts[index] = host.strip(' \\n')\n index += 1\n return hosts\n\n\n# Function who return the ip of the current machine\ndef getIp():\n ip = os.popen('ifconfig ens3 | grep \"inet ad\" | cut -f2 -d: | awk \\'{print $1}\\'', \"r\").read()\n ip = ip.replace('\\n', '')\n return ip\n\n\n# Check if String il already present in the file\ndef isAlreadyAdd(pathFile, string):\n file = open(pathFile)\n for line in file:\n if string in line:\n return True\n return False\n\n\ndef deleteLineWithString(pathFile, stringResearch):\n contenu = \"\"\n fichier = open(pathFile, \"r\")\n for ligne in fichier:\n if not (stringResearch in ligne):\n contenu += ligne\n fichier.close()\n\n fichier = open('tmp.txt', 'w')\n fichier.write(contenu)\n fichier.close()\n os.system('sudo mv tmp.txt /etc/hosts >> /dev/null 2>&1')\n return\n\n\n# Function for check host\ndef hostIsUp(host):\n if os.system('ping -c 1 ' + host + ' >> /dev/null 2>&1'):\n return False\n return True\n\n\n# Function for recover ip by using server name\ndef getIpServerName(config, serverName):\n ip = \"\"\n value = serverName.split('-')\n if len(value) == 2:\n try:\n hosts = config.get(value[0], \"hosts\").split(',')\n ip = hosts[int(value[1]) - 1].strip(' \\n')\n except:\n return ip\n return ip\n\n\n# Function for update file on specific server\ndef updateFileServer(config, serverName):\n ip = getIpServerName(config, serverName)\n out = subprocess.run(['tar', 'czf', '/tmp/SDTD-Mazerunner-Script.tar.gz', '.'],\n cwd=os.getcwd(),\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, check=True)\n if out.returncode == 0:\n logging.info(\"Compressing directory done [success]\")\n else:\n logging.error(\"Compressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'sudo rm -rf SDTD-Mazerunner/script/'])\n out = subprocess.run(\n ['scp', '-pq', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', '/tmp/SDTD-Mazerunner-Script.tar.gz',\n 'xnet@' + ip + ':~/'], check=True)\n if out.returncode == 0:\n logging.info(\"Transfer done [success]\")\n else:\n logging.error(\"Transferring files failed [error]\")\n logging.info(\"Detar file ...\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'mkdir -p SDTD-Mazerunner/script'])\n out = subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'tar xzf SDTD-Mazerunner-Script.tar.gz -C SDTD-Mazerunner/script'])\n if out.returncode == 0:\n logging.info(\"Decompressing directory done [success]\")\n else:\n logging.error(\"Decompressing directory failed [error]\")\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'rm SDTD-Mazerunner-Script.tar.gz'])\n return\n\n\n# Function for install basic environment\ndef installEnvironmentServer(config, serverName):\n ip = getIpServerName(config, serverName)\n\n subprocess.run(['ssh', '-o', 'StrictHostKeyChecking=no', '-i', '~/.ssh/xnet', 'xnet@' + ip,\n 'source ~/.profile; ./script/install_config_machine.py'])\n return\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*
#Perso
from signalManipulation import *
from manipulateData import *
#Module
import pickle
from sklearn import svm, grid_search
from sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeClassifier
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, roc_auc_score
from sklearn.preprocessing import scale
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import StratifiedKFold
from copy import copy,deepcopy
import pylab as pl
#======================== TOOLS ========================
#======================================================
def writeResults(results, best_params, best_score, modelType, penalty, scoreType,\
transformedData, scores=None):
"""
Write results of a grid_search in a file
[parameters] [score] [STD]
...
[Confusion Matrix of the best model on train]
[Confusion Matrix of the best model on test]
Best Params : XXXX Score CV : XXX%
Accuracy Train : XX Accuracy Test : XX
F1 Train : XX F1 Test : XX
Ex :
1.3 0.91
1.7 0.65
[[9787 4]
[ 399 520]]
[[6690 276]
[ 598 30]]
Best Params : 1.3 Score CV : 0.91
Accuracy Train : 0.91 Accuracy Test : 0.80
F1 Train : 0.80 F1 Test : 0.50
"""
strScores = ""
if modelType=='NonLinear':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['C'], model[0]['gamma'], model[1], np.std(model[2]))
elif modelType=='ElasticNet':
for model in results:
print(model)
strScores += "{:.4} {} {} {}\n".format(model[0]['alpha'], model[0]['l1_ratio'], model[1], np.std(model[2]))
elif modelType=='Pipe':
for model in results:
print(model)
if 'classif__C' in model[0].keys():
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__C'], model[1], np.std(model[2]))
else:
strScores += "{} {:.4} {} {}\n".format(model[0]['csp__n_components'], model[0]['classif__alpha'], model[1], np.std(model[2]))
elif modelType=='Ridge':
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['alpha'], model[1], np.std(model[2]))
else: #Linear, C is the only parameter
for model in results:
print(model)
strScores += "{:.4} {} {}\n".format(model[0]['C'], model[1], np.std(model[2]))
strScores += "Best Params : {} Score CrossVal : {} \n".format(best_params, best_score)
if scores:
strScores += "{}\n{}\n".format(str(scores['cMatrixTrain']),\
str(scores['cMatrixTest']))
strScores += "Accuracy Train : {} Accuracy Test : {} \n".format(scores['accTrain'], scores['accTest'])
strScores += "F1 Train : {} F1 Test : {} \n".format(scores['f1Train'],\
scores['f1Test'])
strScores += "Roc_Auc Train : {} Roc_Auc Test : {} \n".format(scores['rocTrain'],scores['rocTest'])
else:
print("No Test file")
strScores += "\nNo Test file\n=========\n"
f = open("{}{}HyperSelection{}{}{}.txt".format(RESULTS_PATH, penalty, modelType.title(), scoreType.title(), transformedData.title()), 'w')
f.write(strScores)
f.close()
def getScores(y, yPredTrain, yTest, yPredTest):
scores = dict()
scores['f1Train'] = f1_score(y, yPredTrain)
scores['f1Test'] = f1_score(yTest, yPredTest)
scores['accTrain'] = accuracy_score(y, yPredTrain)
scores['accTest'] = accuracy_score(yTest, yPredTest)
scores['rocTrain'] = roc_auc_score(y, yPredTrain)
scores['rocTest'] = roc_auc_score(yTest, yPredTest)
scores['cMatrixTrain'] = confusion_matrix(y, yPredTrain)
scores['cMatrixTest'] = confusion_matrix(yTest, yPredTest)
proba = float(len(np.where(y==1)[0]))/len(y)
if proba < 0.50:
proba = 1 - proba
scores['random'] = proba
return scores
def printScores(scores):
strSave = "Train :\n"
strSave += "Accuracy : {}\n".format(scores['accTrain'])
strSave += "Roc_Auc : {}\n".format(scores['rocTrain'])
strSave += "F1 : {}\n".format(scores['f1Train'])
strSave += "{}\n".format(scores['cMatrixTrain'])
strSave += "Test :\n"
strSave += "Accuracy : {}\n".format(scores['accTest'])
strSave += "Roc_Auc : {}\n".format(scores['rocTest'])
strSave += "F1 : {}\n".format(scores['f1Test'])
strSave += "{}\n".format(scores['cMatrixTest'])
strSave += "Random Accuracy : {}".format(scores['random'])
print strSave
return strSave
def testModel(best,X,y,xTest,yTest,penalty):
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTest = best.predict(xTest)
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
if penalty=='l1':
saveNonZerosCoef(best, 'l1', dataType=transformedData)
analyzeCoef(dataType=transformedData, reg='l1')
return scores
def saveNonZerosCoef(clf, reg, dataType):
nonZerosParams = np.where(clf.coef_ != 0)[0]
print("Nombre de coef : ", len(clf.coef_[0]))
print("Nombre de coef annulés : ", len(nonZerosParams))
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'w') as f:
f.write(str(list(nonZerosParams)))
analyzeCoef(dataType, reg)
def analyzeCoef(dataType, reg):
path = "Images/Screenshots/"
with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'r') as f:
wholeFile = f.read()
print("Here")
print(wholeFile[0], wholeFile[-1])
wholeFile = wholeFile[1:-1]
numGen = map(int,wholeFile.split(','))
#Step
step = np.zeros(40)
steps = np.array([i+1 for i in range(40)])
for num in numGen:
step[num%40] += 1
numGen = map(int,wholeFile.split(','))
#Elec
elec = np.zeros(64)
elecs = np.array([i+1 for i in range(64)])
for num in numGen:
elec[num//40] += 1
ax = plt.subplot()
steps = np.array(steps)/60
ax.bar(steps, step, width=1/60)
ax.set_title("Nombre de coefficients non annulés par pas de temps")
plt.savefig(path+'nonZerosStep{}{}.png'.format(dataType.title(),reg))
plt.show()
ax = plt.subplot()
ax.bar(elecs, elec, width=1)
ax.set_title("Nombre de coefficients non annulés par electrode")
plt.savefig(path+'nonZerosElec{}{}.png'.format(dataType.title(),reg))
plt.show()
#=============== Learner =============================
#====================================================
def learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,1,3)
parameters = {'C': cRange}
if penalty=='l1':
dual=False
else:
dual=True
#Creating Model and begin classification
#=======================================
classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\
penalty,scoring, transformedData, scores=None)
def learnHyperNonLinear(X, y, xTest, yTest, scoring, transformedData,jobs=1):
"""
Grid Search over a set of parameters for a non-linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
cRange = np.logspace(-5,2,8)
gRange = np.logspace(-5,2,8)
parameters = {'C': cRange, 'gamma':gRange}
#Creating Model and begin classification
#=======================================
classif = svm.SVC(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3,refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'NonLinear', 'l2', scoring, transformedData, scores=None)
def learnRidge(X,y,xTest,yTest,scoring, transformedData, jobs):
"""
Grid Search over a set of parameters for linear model
"""
#Check if test is empty, if it is, don't refit and predict data
testAvailable = np.size(xTest,0)!=0
# Parameters selection
#====================
alpha = np.logspace(-3,3,6)
parameters = {'alpha': alpha}
#Creating Model and begin classification
#=======================================
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=10, n_jobs=jobs, verbose=3, refit=testAvailable)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
print(clf.best_params_, clf.best_score_)
if testAvailable:
scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\
'l2',scoring, transformedData, scores=None)
def learnRandomForest(X,y,xTest,yTest,scoring, jobs):
params = {
'n_estimators':[2,10,100],
'max_features':['auto',2,10],
'max_depth':[10,40,2],
'min_samples_split':[2,10,20,50]
}
forest = RandomForestClassifier()
grd = grid_search.GridSearchCV(forest,params, scoring=scoring,cv=3,n_jobs=jobs,verbose=3)
grd.fit(X,y)
yPredTrain = grd.predict(X)
yPredTest = grd.predict(xTest)
print "FOREST : \n"
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
def learnCspPipeline(X, y, xTest, yTest, scoring,transformedData,jobs=1, classifier='lin'):
testAvailable = np.size(xTest)
X = vecToMat(X)
if testAvailable:
xTest = vecToMat(xTest)
if classifier=='lin':
classif = svm.LinearSVC(penalty='l2',class_weight=CLASS_WEIGHT)
params = np.logspace(-5,1,3)
hyper = 'classif__C'
else:
classif = RidgeClassifier(class_weight=CLASS_WEIGHT)
params = np.logspace(-1,3,10)
hyper = 'classif__alpha'
csp = CSP(reg='ledoit_wolf',log=False)
scaler = StandardScaler()
pipe = Pipeline(steps = [('csp',csp), ('scaler',scaler), ('classif',classif)])
pipe = Pipeline(steps = [('csp',csp), ('classif',classif)])
n_components = [1,2,5,10,20,30,40,50]
dico = {'csp__n_components':n_components, hyper:params}
grd = grid_search.GridSearchCV(pipe,dico, cv=5, verbose=3, n_jobs=4)
grd.fit(X,y)
if testAvailable:
scores = testModel(grd.best_estimator_,X,y,xTest,yTest,'l2')
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=scores)
else:
print("No test, don't predict data")
writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=None)
def learnElasticNet(X,y,xTest,yTest,scoring,transformedData='raw',jobs=1):
# Parameters selection
#====================
alpha = np.linspace(0.01,0.2,5)
l1_ratio = np.linspace(0.01,0.3,5)
parameters = {'alpha': alpha, 'l1_ratio': l1_ratio}
#Creating Model and begin classification
#=======================================
classif = ElasticNet(selection='random')
clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3)
print("Begin\n...")
clf.fit(X,y)
#Get results, print and write them into a file
#============================================
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
if np.size(a,0)!=0:
print("Predicting Data :")
yPredTrain = best.predict(X)
yPredTrain[yPredTrain >= 0] = 1
yPredTrain[yPredTrain < 0] = -1
yPredTest = best.predict(xTest)
yPredTest[yPredTest >= 0] = 1
yPredTest[yPredTest < 0] = -1
scores = getScores(y, yPredTrain, yTest, yPredTest)
printScores(scores)
writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\
'ElasticNet', 'l1l2', scoring, transformedData, scores)
nonZerosParams = np.where(best.coef_ != 0)[0]
print(len(nonZerosParams))
print(nonZerosParams)
with open('nonZerosParamsRawElasticNet', 'w') as f:
f.write(str(list(nonZerosParams)))
def learnStep(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1, 10]
parameters = {'C': cRange}
best_score = 0
numStep = np.size(X,1)//64
keptStep = np.ones(numStep, dtype=bool)
copyX = copy(X)
copyXTest = copy(xTest)
scores = np.zeros(numStep)
scoreDecrease = False
numFailed = 0
while not scoreDecrease:
scores[:] = 0
for step in range(numStep):
if not keptStep[step] :
continue
else:
erased = list(np.where(keptStep==False)[0])
if erased != []:
erased.append(step)
X = delTimeStep(X, erased, transformedData)
xTest = delTimeStep(xTest, erased, transformedData)
else:
X = delTimeStep(X,step, transformedData)
xTest = delTimeStep(xTest, step, transformedData)
print("Learning Model without step N°",step)
clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring,\
cv=5, n_jobs=jobs, verbose=3)
clf.fit(X,y)
best = clf.best_estimator_
print(clf.best_params_, clf.best_score_)
yPredTest = best.predict(xTest)
if scoring=='f1':
scores[step] = f1_score(yTest, yPredTest)
else:
scores[step] = roc_auc_score(yTest, yPredTest)
print("Score :", scores[step])
#post process :
X = copy(copyX)
xTest = copy(copyXTest)
worstStep = np.argmax(scores)
keptStep[worstStep] = False
print("Score max : {}, removing step N°{}".format(scores[worstStep], worstStep))
print("Step removed : ", np.where(keptStep==False))
print("Past Best : ", best_score)
if scores[worstStep] > best_score:
best_score = scores[worstStep]
else:
numFailed += 1
if numFailed > 3:
scoreDecrease = True
def learnElecFaster(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
cRange = np.logspace(-5,2,8)
parameters = {'C': cRange}
if np.size(xTest)!=0:
X = np.concatenate((X,xTest))
y = np.concatenate((y,yTest))
# clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3)
# clf.fit(X,y)
# bestParams = clf.best_params_
# print(bestParams['C'], clf.best_score_)
# C = bestParams['C']
C = 1e-5
baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)
best_score = 0
best_selection = []
keptElec = np.ones(64, dtype=bool)
copyX = copy(X)
scores = np.zeros(64)
scoreDecrease = False
numFailed = 0
for numIter in range(63):
scores[:] = 0
for elec in range(64):
if not keptElec[elec] :
#Already deleted
continue
else:
print("Deleting Electrode(s) ...")
erased = list(np.where(keptElec==False)[0])
if erased != []:
erased.append(elec)
X = delElec(X, erased, transformedData)
else:
X = delElec(X,elec, transformedData)
print("Learning Model without elec N°",elec)
clf = grid_search.GridSearchCV(baseClf, {'C':[C]}, scoring=scoring, cv=10, n_jobs=jobs, verbose=1)
clf.fit(X,y)
scores[elec] = clf.best_score_
print(scores[elec])
#post process :
X = copy(copyX)
worstElec = np.argmax(scores)
keptElec[worstElec] = False
removedElec = np.where(keptElec==False)
print("Score max : {}, removing elec N°{}".format(scores[worstElec], worstElec))
print("Elec removed : ", removedElec)
print("Past Best : ", best_score, "with : ", best_selection)
if scores[worstElec] > best_score:
best_score = scores[worstElec]
best_selection = np.where(keptElec==False)
else:
numFailed += 1
with open("selecStep.txt",'a') as f:
f.write("{} : {} with elec {}, numFailed : {}\n".format(numIter, scores[worstElec], removedElec, numFailed))
|
normal
|
{
"blob_id": "d8e8ecbf77828e875082abf8dcbfbc2c29564e20",
"index": 4892,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*\n#Perso\nfrom signalManipulation import *\nfrom manipulateData import *\n\n#Module\nimport pickle\n\nfrom sklearn import svm, grid_search\nfrom sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeClassifier\nfrom sklearn.metrics import confusion_matrix, f1_score, accuracy_score, roc_auc_score\nfrom sklearn.preprocessing import scale\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.cross_validation import StratifiedKFold\n\nfrom copy import copy,deepcopy\n\nimport pylab as pl\n\n#======================== TOOLS ========================\n#======================================================\ndef writeResults(results, best_params, best_score, modelType, penalty, scoreType,\\\n transformedData, scores=None):\n \"\"\"\n Write results of a grid_search in a file\n [parameters] [score] [STD]\n ...\n [Confusion Matrix of the best model on train]\n [Confusion Matrix of the best model on test]\n Best Params : XXXX Score CV : XXX%\n Accuracy Train : XX Accuracy Test : XX\n F1 Train : XX F1 Test : XX\n\n Ex :\n\n 1.3 0.91\n 1.7 0.65\n [[9787 4]\n [ 399 520]]\n [[6690 276]\n [ 598 30]]\n Best Params : 1.3 Score CV : 0.91\n Accuracy Train : 0.91 Accuracy Test : 0.80\n F1 Train : 0.80 F1 Test : 0.50\n \"\"\"\n\n strScores = \"\"\n\n if modelType=='NonLinear':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {} {}\\n\".format(model[0]['C'], model[0]['gamma'], model[1], np.std(model[2]))\n elif modelType=='ElasticNet':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {} {}\\n\".format(model[0]['alpha'], model[0]['l1_ratio'], model[1], np.std(model[2]))\n\n elif modelType=='Pipe':\n for model in results:\n print(model)\n if 'classif__C' in model[0].keys():\n strScores += \"{} {:.4} {} {}\\n\".format(model[0]['csp__n_components'], model[0]['classif__C'], model[1], np.std(model[2]))\n else:\n strScores += \"{} {:.4} {} {}\\n\".format(model[0]['csp__n_components'], model[0]['classif__alpha'], model[1], np.std(model[2]))\n\n elif modelType=='Ridge':\n for model in results:\n print(model)\n strScores += \"{:.4} {} {}\\n\".format(model[0]['alpha'], model[1], np.std(model[2]))\n\n \n else: #Linear, C is the only parameter\n for model in results:\n print(model)\n strScores += \"{:.4} {} {}\\n\".format(model[0]['C'], model[1], np.std(model[2]))\n \n\n strScores += \"Best Params : {} Score CrossVal : {} \\n\".format(best_params, best_score)\n\n if scores:\n strScores += \"{}\\n{}\\n\".format(str(scores['cMatrixTrain']),\\\n str(scores['cMatrixTest']))\n\n strScores += \"Accuracy Train : {} Accuracy Test : {} \\n\".format(scores['accTrain'], scores['accTest'])\n strScores += \"F1 Train : {} F1 Test : {} \\n\".format(scores['f1Train'],\\\n scores['f1Test'])\n strScores += \"Roc_Auc Train : {} Roc_Auc Test : {} \\n\".format(scores['rocTrain'],scores['rocTest'])\n else:\n print(\"No Test file\")\n strScores += \"\\nNo Test file\\n=========\\n\"\n \n f = open(\"{}{}HyperSelection{}{}{}.txt\".format(RESULTS_PATH, penalty, modelType.title(), scoreType.title(), transformedData.title()), 'w')\n f.write(strScores)\n f.close()\n\ndef getScores(y, yPredTrain, yTest, yPredTest):\n\n scores = dict()\n\n scores['f1Train'] = f1_score(y, yPredTrain)\n scores['f1Test'] = f1_score(yTest, yPredTest)\n\n\n scores['accTrain'] = accuracy_score(y, yPredTrain)\n scores['accTest'] = accuracy_score(yTest, yPredTest)\n \n\n scores['rocTrain'] = roc_auc_score(y, yPredTrain)\n scores['rocTest'] = roc_auc_score(yTest, yPredTest)\n \n\n scores['cMatrixTrain'] = confusion_matrix(y, yPredTrain)\n scores['cMatrixTest'] = confusion_matrix(yTest, yPredTest)\n\n proba = float(len(np.where(y==1)[0]))/len(y)\n if proba < 0.50:\n proba = 1 - proba\n scores['random'] = proba\n \n return scores\n\ndef printScores(scores):\n\n strSave = \"Train :\\n\"\n strSave += \"Accuracy : {}\\n\".format(scores['accTrain'])\n strSave += \"Roc_Auc : {}\\n\".format(scores['rocTrain'])\n strSave += \"F1 : {}\\n\".format(scores['f1Train'])\n strSave += \"{}\\n\".format(scores['cMatrixTrain'])\n\n strSave += \"Test :\\n\"\n strSave += \"Accuracy : {}\\n\".format(scores['accTest'])\n strSave += \"Roc_Auc : {}\\n\".format(scores['rocTest'])\n strSave += \"F1 : {}\\n\".format(scores['f1Test'])\n strSave += \"{}\\n\".format(scores['cMatrixTest'])\n\n strSave += \"Random Accuracy : {}\".format(scores['random'])\n\n \n print strSave\n return strSave\n\n\ndef testModel(best,X,y,xTest,yTest,penalty):\n \n print(\"Predicting Data :\")\n yPredTrain = best.predict(X)\n yPredTest = best.predict(xTest)\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n\n if penalty=='l1':\n saveNonZerosCoef(best, 'l1', dataType=transformedData)\n analyzeCoef(dataType=transformedData, reg='l1')\n\n return scores\n\n\ndef saveNonZerosCoef(clf, reg, dataType):\n\n nonZerosParams = np.where(clf.coef_ != 0)[0]\n print(\"Nombre de coef : \", len(clf.coef_[0]))\n print(\"Nombre de coef annulés : \", len(nonZerosParams))\n\n with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'w') as f:\n f.write(str(list(nonZerosParams)))\n\n analyzeCoef(dataType, reg)\n\n\ndef analyzeCoef(dataType, reg):\n\n path = \"Images/Screenshots/\"\n \n with open('nonZerosParams{}{}'.format(dataType.title(),reg), 'r') as f:\n wholeFile = f.read()\n print(\"Here\")\n print(wholeFile[0], wholeFile[-1])\n wholeFile = wholeFile[1:-1]\n numGen = map(int,wholeFile.split(','))\n\n #Step\n step = np.zeros(40)\n steps = np.array([i+1 for i in range(40)])\n for num in numGen:\n step[num%40] += 1\n\n numGen = map(int,wholeFile.split(','))\n\n #Elec\n elec = np.zeros(64)\n elecs = np.array([i+1 for i in range(64)])\n\n for num in numGen:\n elec[num//40] += 1\n\n ax = plt.subplot()\n\n steps = np.array(steps)/60\n \n ax.bar(steps, step, width=1/60)\n ax.set_title(\"Nombre de coefficients non annulés par pas de temps\")\n plt.savefig(path+'nonZerosStep{}{}.png'.format(dataType.title(),reg))\n\n plt.show()\n \n ax = plt.subplot()\n ax.bar(elecs, elec, width=1)\n ax.set_title(\"Nombre de coefficients non annulés par electrode\")\n plt.savefig(path+'nonZerosElec{}{}.png'.format(dataType.title(),reg))\n plt.show()\n\n#=============== Learner =============================\n#====================================================\ndef learnHyperLinear(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n \"\"\"\n Grid Search over a set of parameters for linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,1,3)\n parameters = {'C': cRange}\n\n if penalty=='l1':\n dual=False\n else:\n dual=True\n\n #Creating Model and begin classification\n #=======================================\n classif = svm.LinearSVC(penalty=penalty, class_weight=CLASS_WEIGHT, dual=dual)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n \n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,penalty)\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Linear',\\\n penalty,scoring, transformedData, scores=None)\n \n\n\ndef learnHyperNonLinear(X, y, xTest, yTest, scoring, transformedData,jobs=1):\n \"\"\"\n Grid Search over a set of parameters for a non-linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n \n\n # Parameters selection\n #====================\n cRange = np.logspace(-5,2,8)\n gRange = np.logspace(-5,2,8)\n parameters = {'C': cRange, 'gamma':gRange}\n \n #Creating Model and begin classification\n #=======================================\n classif = svm.SVC(class_weight=CLASS_WEIGHT)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3,refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n \n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'NonLinear', 'l2', scoring, transformedData, scores=scores)\n\n \n else:\n print(\"No test, don't predict data\")\n \n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'NonLinear', 'l2', scoring, transformedData, scores=None)\n\ndef learnRidge(X,y,xTest,yTest,scoring, transformedData, jobs):\n \"\"\"\n Grid Search over a set of parameters for linear model\n \"\"\"\n #Check if test is empty, if it is, don't refit and predict data\n testAvailable = np.size(xTest,0)!=0\n\n # Parameters selection\n #====================\n alpha = np.logspace(-3,3,6)\n parameters = {'alpha': alpha}\n\n #Creating Model and begin classification\n #=======================================\n classif = RidgeClassifier(class_weight=CLASS_WEIGHT)\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=10, n_jobs=jobs, verbose=3, refit=testAvailable)\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n print(clf.best_params_, clf.best_score_)\n\n if testAvailable:\n scores = testModel(clf.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\\\n 'l2',scoring, transformedData, scores=scores)\n else:\n print(\"No test, don't predict data\")\n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,'Ridge',\\\n 'l2',scoring, transformedData, scores=None)\n \n\ndef learnRandomForest(X,y,xTest,yTest,scoring, jobs):\n\n params = {\n 'n_estimators':[2,10,100],\n 'max_features':['auto',2,10],\n 'max_depth':[10,40,2],\n 'min_samples_split':[2,10,20,50]\n }\n \n forest = RandomForestClassifier()\n\n grd = grid_search.GridSearchCV(forest,params, scoring=scoring,cv=3,n_jobs=jobs,verbose=3)\n grd.fit(X,y)\n\n yPredTrain = grd.predict(X)\n yPredTest = grd.predict(xTest)\n\n print \"FOREST : \\n\"\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n\n\ndef learnCspPipeline(X, y, xTest, yTest, scoring,transformedData,jobs=1, classifier='lin'):\n\n testAvailable = np.size(xTest)\n \n X = vecToMat(X)\n\n if testAvailable:\n xTest = vecToMat(xTest)\n\n if classifier=='lin':\n classif = svm.LinearSVC(penalty='l2',class_weight=CLASS_WEIGHT)\n params = np.logspace(-5,1,3)\n hyper = 'classif__C'\n\n else:\n classif = RidgeClassifier(class_weight=CLASS_WEIGHT)\n params = np.logspace(-1,3,10)\n hyper = 'classif__alpha'\n\n csp = CSP(reg='ledoit_wolf',log=False)\n scaler = StandardScaler()\n pipe = Pipeline(steps = [('csp',csp), ('scaler',scaler), ('classif',classif)])\n pipe = Pipeline(steps = [('csp',csp), ('classif',classif)])\n\n n_components = [1,2,5,10,20,30,40,50]\n dico = {'csp__n_components':n_components, hyper:params}\n\n grd = grid_search.GridSearchCV(pipe,dico, cv=5, verbose=3, n_jobs=4)\n grd.fit(X,y)\n\n \n if testAvailable:\n scores = testModel(grd.best_estimator_,X,y,xTest,yTest,'l2')\n writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=scores)\n\n else:\n print(\"No test, don't predict data\") \n writeResults(grd.grid_scores_, grd.best_params_, grd.best_score_,'Pipe', 'l2', scoring, transformedData, scores=None)\n\n\n \ndef learnElasticNet(X,y,xTest,yTest,scoring,transformedData='raw',jobs=1):\n\n # Parameters selection\n #====================\n alpha = np.linspace(0.01,0.2,5)\n l1_ratio = np.linspace(0.01,0.3,5)\n parameters = {'alpha': alpha, 'l1_ratio': l1_ratio}\n \n #Creating Model and begin classification\n #=======================================\n classif = ElasticNet(selection='random')\n clf = grid_search.GridSearchCV(classif, parameters, scoring=scoring, cv=5, n_jobs=jobs,verbose=3)\n\n print(\"Begin\\n...\")\n clf.fit(X,y)\n\n #Get results, print and write them into a file\n #============================================\n best = clf.best_estimator_\n print(clf.best_params_, clf.best_score_)\n\n if np.size(a,0)!=0:\n print(\"Predicting Data :\")\n yPredTrain = best.predict(X)\n yPredTrain[yPredTrain >= 0] = 1\n yPredTrain[yPredTrain < 0] = -1\n\n yPredTest = best.predict(xTest)\n yPredTest[yPredTest >= 0] = 1\n yPredTest[yPredTest < 0] = -1\n\n scores = getScores(y, yPredTrain, yTest, yPredTest)\n printScores(scores)\n \n writeResults(clf.grid_scores_, clf.best_params_, clf.best_score_,\\\n 'ElasticNet', 'l1l2', scoring, transformedData, scores)\n \n nonZerosParams = np.where(best.coef_ != 0)[0]\n print(len(nonZerosParams))\n print(nonZerosParams)\n\n with open('nonZerosParamsRawElasticNet', 'w') as f:\n f.write(str(list(nonZerosParams)))\n\ndef learnStep(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n\n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n cRange = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1, 10]\n parameters = {'C': cRange}\n\n best_score = 0\n numStep = np.size(X,1)//64\n keptStep = np.ones(numStep, dtype=bool)\n copyX = copy(X)\n copyXTest = copy(xTest)\n\n scores = np.zeros(numStep)\n scoreDecrease = False\n numFailed = 0\n \n while not scoreDecrease:\n\n scores[:] = 0\n\n for step in range(numStep):\n if not keptStep[step] :\n continue\n else:\n erased = list(np.where(keptStep==False)[0])\n \n if erased != []:\n erased.append(step)\n X = delTimeStep(X, erased, transformedData)\n xTest = delTimeStep(xTest, erased, transformedData)\n else:\n X = delTimeStep(X,step, transformedData)\n xTest = delTimeStep(xTest, step, transformedData)\n\n print(\"Learning Model without step N°\",step)\n\n clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring,\\\n cv=5, n_jobs=jobs, verbose=3)\n clf.fit(X,y)\n\n best = clf.best_estimator_\n print(clf.best_params_, clf.best_score_)\n\n yPredTest = best.predict(xTest)\n\n\n if scoring=='f1':\n scores[step] = f1_score(yTest, yPredTest)\n else:\n scores[step] = roc_auc_score(yTest, yPredTest)\n\n\n print(\"Score :\", scores[step])\n\n #post process :\n X = copy(copyX)\n xTest = copy(copyXTest)\n \n worstStep = np.argmax(scores)\n keptStep[worstStep] = False\n\n print(\"Score max : {}, removing step N°{}\".format(scores[worstStep], worstStep))\n print(\"Step removed : \", np.where(keptStep==False))\n print(\"Past Best : \", best_score)\n\n if scores[worstStep] > best_score:\n best_score = scores[worstStep]\n else:\n numFailed += 1\n \n if numFailed > 3:\n scoreDecrease = True\n\ndef learnElecFaster(X, y, xTest, yTest, penalty, scoring, transformedData,jobs=1):\n \n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n cRange = np.logspace(-5,2,8)\n \n parameters = {'C': cRange}\n\n if np.size(xTest)!=0:\n X = np.concatenate((X,xTest))\n y = np.concatenate((y,yTest))\n \n # clf = grid_search.GridSearchCV(baseClf, parameters, scoring=scoring, cv=5, n_jobs=jobs, verbose=3)\n # clf.fit(X,y)\n # bestParams = clf.best_params_\n # print(bestParams['C'], clf.best_score_)\n\n # C = bestParams['C']\n C = 1e-5\n baseClf = svm.LinearSVC(penalty='l2', class_weight=CLASS_WEIGHT)\n\n best_score = 0\n best_selection = []\n keptElec = np.ones(64, dtype=bool)\n\n copyX = copy(X)\n \n scores = np.zeros(64)\n scoreDecrease = False\n numFailed = 0\n \n for numIter in range(63):\n\n scores[:] = 0\n\n for elec in range(64):\n if not keptElec[elec] :\n #Already deleted\n continue\n else:\n\n print(\"Deleting Electrode(s) ...\")\n erased = list(np.where(keptElec==False)[0]) \n if erased != []:\n erased.append(elec)\n X = delElec(X, erased, transformedData)\n else:\n X = delElec(X,elec, transformedData)\n\n print(\"Learning Model without elec N°\",elec)\n\n clf = grid_search.GridSearchCV(baseClf, {'C':[C]}, scoring=scoring, cv=10, n_jobs=jobs, verbose=1)\n clf.fit(X,y)\n \n scores[elec] = clf.best_score_\n\n print(scores[elec])\n \n #post process :\n X = copy(copyX)\n \n worstElec = np.argmax(scores)\n keptElec[worstElec] = False\n removedElec = np.where(keptElec==False)\n print(\"Score max : {}, removing elec N°{}\".format(scores[worstElec], worstElec))\n print(\"Elec removed : \", removedElec)\n \n print(\"Past Best : \", best_score, \"with : \", best_selection)\n\n if scores[worstElec] > best_score:\n best_score = scores[worstElec]\n best_selection = np.where(keptElec==False)\n\n else:\n numFailed += 1\n\n with open(\"selecStep.txt\",'a') as f:\n f.write(\"{} : {} with elec {}, numFailed : {}\\n\".format(numIter, scores[worstElec], removedElec, numFailed))\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 17:38:50 2019
@author: User
"""
import numpy as np
import pandas as pd
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
x = ["Cabrini",337,1.5,2.3,9.0,0]
x = np.array(x).reshape(1,-1)
x[:,0] = labelencoder.transform(x[:,0])
x = onehotencoder.transform(x).toarray()
x = x[:,1:]
regressor.predict(x)
|
normal
|
{
"blob_id": "94e8f0532da76c803b23fe2217b07dc8cf285710",
"index": 950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dataset.info())\n<mask token>\nregressor.fit(features, labels)\n<mask token>\nregressor.predict(x)\n",
"step-3": "<mask token>\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\n<mask token>\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\n<mask token>\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n<mask token>\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-4": "<mask token>\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 17:38:50 2019\n\n@author: User\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv') \nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values \nlabels = dataset.iloc[:, -1:].values \nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features = [0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n\n\nfrom sklearn.linear_model import LinearRegression \nregressor = LinearRegression() \nregressor.fit(features, labels)\n\n\n\nx = [\"Cabrini\",337,1.5,2.3,9.0,0]\nx = np.array(x).reshape(1,-1)\nx[:,0] = labelencoder.transform(x[:,0])\nx = onehotencoder.transform(x).toarray()\nx = x[:,1:]\nregressor.predict(x)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#proper clarification for requirement is required
import boto3
s3_resource = boto3.resource('s3')
s3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})
s3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)
s3_resource.Object(second_bucket_name, first_file_name).delete()
|
normal
|
{
"blob_id": "44097da54a0bb03ac14196712111a1489a956689",
"index": 5387,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-3": "<mask token>\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-4": "import boto3\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME,\n CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name,\n Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-5": "#proper clarification for requirement is required\nimport boto3\ns3_resource = boto3.resource('s3')\ns3_resource.create_bucket(Bucket=YOUR_BUCKET_NAME, CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'})\ns3_resource.Bucket(first_bucket_name).upload_file(Filename=first_file_name, Key=first_file_name)\ns3_resource.Object(second_bucket_name, first_file_name).delete()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Flask
from threading import Timer
from crypto_crawler.const import BITCOIN_CRAWLING_PERIOD_SEC, COIN_MARKET_CAP_URL
from crypto_crawler.crawler import get_web_content, filter_invalid_records
app = Flask(__name__)
crawl_enabled = True
def crawl_bitcoin_price():
print("start crawling!")
bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)
bitcoin_prices = filter_invalid_records(bitcoin_prices)
# write_many(INSERT_CRYPTO_MANY, list(map(lambda x: x.to_tuple(), bitcoin_prices)))
# alarm_arbitrage(bitcoin_prices)
# alarm_prediction()
if crawl_enabled:
Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()
else:
print("crawl paused!")
return
# actual crawl
@app.route("/pause")
def pause():
global crawl_enabled
crawl_enabled = False
return "PAUSED!"
@app.route("/status")
def status():
return "100%"
@app.route("/")
def default():
return "SAMPLE TRADING SYSTEM"
if __name__ == "__main__":
crawl_bitcoin_price()
app.run()
|
normal
|
{
"blob_id": "ebbc6f9115e6b4ca7d1050a59cf175d123b6f3aa",
"index": 4871,
"step-1": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\[email protected]('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\n<mask token>\n\n\[email protected]('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\[email protected]('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\[email protected]('/status')\ndef status():\n return '100%'\n\n\[email protected]('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\[email protected]('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\[email protected]('/status')\ndef status():\n return '100%'\n\n\[email protected]('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\nif __name__ == '__main__':\n crawl_bitcoin_price()\n app.run()\n",
"step-4": "<mask token>\napp = Flask(__name__)\ncrawl_enabled = True\n\n\ndef crawl_bitcoin_price():\n print('start crawling!')\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print('crawl paused!')\n return\n\n\[email protected]('/pause')\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return 'PAUSED!'\n\n\[email protected]('/status')\ndef status():\n return '100%'\n\n\[email protected]('/')\ndef default():\n return 'SAMPLE TRADING SYSTEM'\n\n\nif __name__ == '__main__':\n crawl_bitcoin_price()\n app.run()\n",
"step-5": "from flask import Flask\nfrom threading import Timer\n\nfrom crypto_crawler.const import BITCOIN_CRAWLING_PERIOD_SEC, COIN_MARKET_CAP_URL\nfrom crypto_crawler.crawler import get_web_content, filter_invalid_records\n\napp = Flask(__name__)\ncrawl_enabled = True\n\n\ndef crawl_bitcoin_price():\n print(\"start crawling!\")\n bitcoin_prices = get_web_content(COIN_MARKET_CAP_URL)\n bitcoin_prices = filter_invalid_records(bitcoin_prices)\n # write_many(INSERT_CRYPTO_MANY, list(map(lambda x: x.to_tuple(), bitcoin_prices)))\n # alarm_arbitrage(bitcoin_prices)\n # alarm_prediction()\n if crawl_enabled:\n Timer(BITCOIN_CRAWLING_PERIOD_SEC, crawl_bitcoin_price).start()\n else:\n print(\"crawl paused!\")\n return\n\n # actual crawl\n\n\[email protected](\"/pause\")\ndef pause():\n global crawl_enabled\n crawl_enabled = False\n return \"PAUSED!\"\n\n\[email protected](\"/status\")\ndef status():\n return \"100%\"\n\n\[email protected](\"/\")\ndef default():\n return \"SAMPLE TRADING SYSTEM\"\n\n\nif __name__ == \"__main__\":\n crawl_bitcoin_price()\n app.run()\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
from flask import Flask, render_template, request, redirect, flash, session
from mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
import re
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = "something secret10"
DATABASE = "exam_quote_dash"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
#users
# id_users, first_name, last_name, email, password
#quotes
#id_quotes, from_user, liked_from, content, author
@app.route("/")
def signin():
return render_template("index.html")
@app.route("/register", methods=["POST"])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash("please enter your first name.")
if len(request.form['last_name']) < 2:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if len(request.form['password']) < 8:
is_valid = False
flash("password must be atleast 8 characters long.")
if (request.form['password'] != request.form['confirm_password']):
is_valid = False
flash("passwords do not match.")
if not is_valid:
return redirect('/')
else:
flash("sucessfully added")
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = "INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);"
data = {
'em': request.form['email'],
'pw': pw_hash,
'fn': request.form['first_name'],
'ln': request.form['last_name']
}
id_users = mysql.query_db(query,data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM users WHERE email = %(em)s;"
data = {
'em': request.form['email']
}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form['password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash("Email and/or password does not match.")
return redirect('/')
else:
flash("Please enter your registered Email.")
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes JOIN users ON from_user = id_users;"
join = mysql.query_db(query)
return render_template('quotes.html', joined = join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash("quotes are required to be longer than 10 characters.")
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);"
data = {
'quo': request.form['content'],
'auth': request.form['author'],
'from': session['id_users']
}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id,thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM quotes WHERE id_quotes = %(id)s;"
data = {
'id': id
}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route("/edit")
def edit():
mysql = connectToMySQL(DATABASE)
query = "SELECT * From users WHERE id_users = %(id)s"
data ={
'id' : session['id_users']
}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users = users_table)
@app.route("/update", methods=["POST"])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash("please enter your first name.")
if len(request.form['l_name']) < 3:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if not is_valid:
return redirect('/edit')
else:
flash("sucessfully updated")
mysql = connectToMySQL(DATABASE)
query = "UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;"
data = {
"fn": request.form["f_name"],
"ln": request.form["l_name"],
"em": request.form["email"],
'id' : session['id_users']
}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route("/my_posts")
def my_post():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes WHERE from_user = %(id)s;"
data ={
'id' : session['id_users']
}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes = my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True)
|
normal
|
{
"blob_id": "e732fa0e2b377a87b8b088303b277cc08cb695b3",
"index": 5279,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n<mask token>\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\[email protected]('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "<mask token>\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = 'something secret10'\nDATABASE = 'exam_quote_dash'\nEMAIL_REGEX = re.compile('^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\\\.[a-zA-Z]+$')\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\[email protected]('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request, redirect, flash, session\nfrom mysqlconnection import connectToMySQL\nfrom flask_bcrypt import Bcrypt\nimport re\n\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = \"something secret10\"\nDATABASE = \"exam_quote_dash\"\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$') \n\n#users\n# id_users, first_name, last_name, email, password\n\n#quotes\n#id_quotes, from_user, liked_from, content, author\n\[email protected](\"/\")\ndef signin():\n return render_template(\"index.html\")\n\[email protected](\"/register\", methods=[\"POST\"])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['last_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if len(request.form['password']) < 8:\n \tis_valid = False\n \tflash(\"password must be atleast 8 characters long.\")\n if (request.form['password'] != request.form['confirm_password']):\n \tis_valid = False\n \tflash(\"passwords do not match.\")\n if not is_valid:\n return redirect('/')\n else:\n flash(\"sucessfully added\")\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = \"INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);\"\n data = {\n 'em': request.form['email'],\n 'pw': pw_hash,\n 'fn': request.form['first_name'],\n 'ln': request.form['last_name']\n }\n id_users = mysql.query_db(query,data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name'] \n\n return redirect('/quotes')\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM users WHERE email = %(em)s;\"\n data = {\n 'em': request.form['email']\n }\n result = mysql.query_db(query, data)\n\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form['password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash(\"Email and/or password does not match.\")\n return redirect('/')\n else:\n flash(\"Please enter your registered Email.\")\n return redirect('/')\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes JOIN users ON from_user = id_users;\"\n join = mysql.query_db(query)\n\n return render_template('quotes.html', joined = join)\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n\n if len(request.form['content']) < 10:\n flash(\"quotes are required to be longer than 10 characters.\")\n is_valid == False\n\n if is_valid == True: \n mysql = connectToMySQL(DATABASE)\n query = \"INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);\"\n data = {\n 'quo': request.form['content'],\n 'auth': request.form['author'],\n\n 'from': session['id_users']\n }\n mysql.query_db(query, data)\n\n return redirect('/quotes')\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id,thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = \"DELETE FROM quotes WHERE id_quotes = %(id)s;\"\n data = {\n 'id': id\n } \n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\[email protected](\"/edit\")\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * From users WHERE id_users = %(id)s\"\n data ={ \n 'id' : session['id_users']\n }\n users_table = mysql.query_db(query, data)\n\n\n return render_template('edit_account.html', users = users_table)\n\[email protected](\"/update\", methods=[\"POST\"])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['l_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if not is_valid:\n return redirect('/edit')\n else:\n flash(\"sucessfully updated\")\n mysql = connectToMySQL(DATABASE)\n query = \"UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;\"\n data = {\n \"fn\": request.form[\"f_name\"],\n \"ln\": request.form[\"l_name\"],\n \"em\": request.form[\"email\"],\n 'id' : session['id_users']\n }\n id = mysql.query_db(query, data)\n\n session['greeting'] = request.form['f_name'] \n return redirect('/quotes')\n\[email protected](\"/my_posts\")\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes WHERE from_user = %(id)s;\"\n data ={ \n 'id' : session['id_users']\n }\n my_quotes = mysql.query_db(query, data)\n\n return render_template('my_posts.html', quotes = my_quotes)\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\nif __name__==\"__main__\": \n app.run(debug=True) ",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
from classifier import classifier
from get_input_args import get_input_args
from os import listdir
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/calculates_results_stats_hints.py
#
# PROGRAMMER:
# DATE CREATED:
# REVISED DATE:
# PURPOSE: This is a *hints* file to help guide students in creating the
# function calculates_results_stats that calculates the statistics
# of the results of the programrun using the classifier's model
# architecture to classify the images. This function will use the
# results in the results dictionary to calculate these statistics.
# This function will then put the results statistics in a dictionary
# (results_stats_dic) that's created and returned by this function.
# This will allow the user of the program to determine the 'best'
# model for classifying the images. The statistics that are calculated
# will be counts and percentages. Please see "Intro to Python - Project
# classifying Images - xx Calculating Results" for details on the
# how to calculate the counts and percentages for this function.
# This function inputs:
# - The results dictionary as results_dic within calculates_results_stats
# function and results for the function call within main.
# This function creates and returns the Results Statistics Dictionary -
# results_stats_dic. This dictionary contains the results statistics
# (either a percentage or a count) where the key is the statistic's
# name (starting with 'pct' for percentage or 'n' for count) and value
# is the statistic's value. This dictionary should contain the
# following keys:
# n_images - number of images
# n_dogs_img - number of dog images
# n_notdogs_img - number of NON-dog images
# n_match - number of matches between pet & classifier labels
# n_correct_dogs - number of correctly classified dog images
# n_correct_notdogs - number of correctly classified NON-dog images
# n_correct_breed - number of correctly classified dog breeds
# pct_match - percentage of correct matches
# pct_correct_dogs - percentage of correctly classified dogs
# pct_correct_breed - percentage of correctly classified dog breeds
# pct_correct_notdogs - percentage of correctly classified NON-dogs
#
##
# TODO 5: EDIT and ADD code BELOW to do the following that's stated in the
# comments below that start with "TODO: 5" for the calculates_results_stats
# function. Please be certain to replace None in the return statement with
# the results_stats_dic dictionary that you create with this function
#
def calculates_results_stats(results_dic):
"""
Calculates statistics of the results of the program run using classifier's model
architecture to classifying pet images. Then puts the results statistics in a
dictionary (results_stats_dic) so that it's returned for printing as to help
the user to determine the 'best' model for classifying images. Note that
the statistics calculated as the results are either percentages or counts.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
Returns:
results_stats_dic - Dictionary that contains the results statistics (either
a percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value. See comments above
and the classroom Item XX Calculating Results for details
on how to calculate the counts and statistics.
"""
# Creates empty dictionary for results_stats_dic
results_stats_dic = dict()
# Sets all counters to initial values of zero so that they can
# be incremented while processing through the images in results_dic
results_stats_dic['n_dogs_img'] = 0
results_stats_dic['n_match'] = 0
results_stats_dic['n_correct_dogs'] = 0
results_stats_dic['n_correct_notdogs'] = 0
results_stats_dic['n_correct_breed'] = 0
# process through the results dictionary
for key in results_dic:
# Labels Match Exactly
if results_dic[key][2] == 1:
results_stats_dic['n_match'] += 1
# TODO: 5a. REPLACE pass with CODE that counts how many pet images of
# dogs had their breed correctly classified. This happens
# when the pet image label indicates the image is-a-dog AND
# the pet image label and the classifier label match. You
# will need to write a conditional statement that determines
# when the dog breed is correctly classified and then
# increments 'n_correct_breed' by 1. Recall 'n_correct_breed'
# is a key in the results_stats_dic dictionary with it's value
# representing the number of correctly classified dog breeds.
#
# Pet Image Label is a Dog AND Labels match- counts Correct Breed
if results_dic[key][3] == 1 and results_dic[key][2] == 1:
results_stats_dic['n_correct_breed'] += 1
# Pet Image Label is a Dog - counts number of dog images
if results_dic[key][3] == 1:
results_stats_dic['n_dogs_img'] += 1
# Classifier classifies image as Dog (& pet image is a dog)
# counts number of correct dog classifications
if results_dic[key][4] == 1:
results_stats_dic['n_correct_dogs'] += 1
# TODO: 5b. REPLACE pass with CODE that counts how many pet images
# that are NOT dogs were correctly classified. This happens
# when the pet image label indicates the image is-NOT-a-dog
# AND the classifier label indicates the images is-NOT-a-dog.
# You will need to write a conditional statement that
# determines when the classifier label indicates the image
# is-NOT-a-dog and then increments 'n_correct_notdogs' by 1.
# Recall the 'else:' above 'pass' already indicates that the
# pet image label indicates the image is-NOT-a-dog and
# 'n_correct_notdogs' is a key in the results_stats_dic dictionary
# with it's value representing the number of correctly
# classified NOT-a-dog images.
#
# Pet Image Label is NOT a Dog
else:
# Classifier classifies image as NOT a Dog(& pet image isn't a dog)
# counts number of correct NOT dog clasifications.
if results_dic[key][3] == 0 and results_dic[key][4] == 0:
results_stats_dic['n_correct_notdogs'] += 1
# Calculates run statistics (counts & percentages) below that are calculated
# using the counters from above.
# calculates number of total images
results_stats_dic['n_images'] = len(results_dic)
# calculates number of not-a-dog images using - images & dog images counts
results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] -
results_stats_dic['n_dogs_img'])
# TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly
# matched images. Recall that this can be calculated by the
# number of correctly matched images ('n_match') divided by the
# number of images('n_images'). This result will need to be
# multiplied by 100.0 to provide the percentage.
#
# Calculates % correct for matches
results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100
# TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified dog images. Recall that this can be calculated by
# the number of correctly classified dog images('n_correct_dogs')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct dogs
results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100
# TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly
# classified breeds of dogs. Recall that this can be calculated
# by the number of correctly classified breeds of dog('n_correct_breed')
# divided by the number of dog images('n_dogs_img'). This result
# will need to be multiplied by 100.0 to provide the percentage.
#
# Calculates % correct breed of dog
results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100
# Calculates % correct not-a-dog images
# Uses conditional statement for when no 'not a dog' images were submitted
if results_stats_dic['n_notdogs_img'] > 0:
results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /
results_stats_dic['n_notdogs_img']) * 100.0
else:
results_stats_dic['pct_correct_notdogs'] = 0.0
# TODO 5f. REPLACE None with the results_stats_dic dictionary that you
# created with this function
return results_stats_dic
#----------------------------------------------------------------------------------------------------
# METHODS FROM OTHER LESSONS
#----------------------------------------------------------------------------------------------------
def adjust_results4_isadog(results_dic, dogfile):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. Where the list will contain the following items:
index 0 = pet image label (string)
index 1 = classifier label (string)
index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
------ where index 3 & index 4 are added by this function -----
NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogfile - A text file that contains names of all dogs from the classifier
function and dog names from the pet image files. This file has
one dog name per line dog names are all in lowercase with
spaces separating the distinct words of the dog name. Dog names
from the classifier function can be a string of dog names separated
by commas when a particular breed of dog has multiple dog names
associated with that breed (ex. maltese dog, maltese terrier,
maltese) (string - indicates text file's filename)
Returns:
None - results_dic is mutable data type so no return needed.
"""
# Creates dognames dictionary for quick matching to results_dic labels from
# real answer & classifier's answer
dognames_dic = dict()
# Reads in dognames from file, 1 name per line & automatically closes file
with open(dogfile, "r") as infile:
# Reads in dognames from first line in file
line = infile.readline()
# Processes each line in file until reaching EOF (end-of-file) by
# processing line and adding dognames to dognames_dic with while loop
while line != "":
# print("----- line: {}".format(line))
# TODO: 4a. REPLACE pass with CODE to remove the newline character
# from the variable line
#
# Process line by striping newline from line
line = line.strip('\n')
# TODO: 4b. REPLACE pass with CODE to check if the dogname(line)
# exists within dognames_dic, then if the dogname(line)
# doesn't exist within dognames_dic then add the dogname(line)
# to dognames_dic as the 'key' with the 'value' of 1.
#
# adds dogname(line) to dogsnames_dic if it doesn't already exist
# in the dogsnames_dic dictionary
if line not in dognames_dic:
dognames_dic[line] = 1
# print("----- dognames_dic[{}]: {}".format(line, dognames_dic[line]))
# Reads in next line in file to be processed with while loop
# if this line isn't empty (EOF)
line = infile.readline()
# Add to whether pet labels & classifier labels are dogs by appending
# two items to end of value(List) in results_dic.
# List Index 3 = whether(1) or not(0) Pet Image Label is a dog AND
# List Index 4 = whether(1) or not(0) Classifier Label is a dog
# How - iterate through results_dic if labels are found in dognames_dic
# then label "is a dog" index3/4=1 otherwise index3/4=0 "not a dog"
for key in results_dic:
# Pet Image Label IS of Dog (e.g. found in dognames_dic)
if results_dic[key][0] in dognames_dic:
# Classifier Label IS image of Dog (e.g. found in dognames_dic)
# appends (1, 1) because both labels are dogs
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((1, 1))
# ('cat_01.jpg', ['cat', 'lynx', 0])
# ('Poodle_07927.jpg', ['poodle', 'standard poodle, poodle', 1])
# TODO: 4c. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (1,0) to the value using
# the extend list function. This indicates
# the pet label is-a-dog, classifier label is-NOT-a-dog.
#
# Classifier Label IS NOT image of dog (e.g. NOT in dognames_dic)
# appends (1,0) because only pet label is a dog
else:
results_dic[key].extend((1, 0))
# Pet Image Label IS NOT a Dog image (e.g. NOT found in dognames_dic)
else:
# TODO: 4d. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (0,1) to the value uisng
# the extend list function. This indicates
# the pet label is-NOT-a-dog, classifier label is-a-dog.
#
# Classifier Label IS image of Dog (e.g. found in dognames_dic)
# appends (0, 1)because only Classifier labe is a dog
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((0, 1))
# TODO: 4e. REPLACE pass BELOW with CODE that adds the following to
# results_dic dictionary for the key indicated by the
# variable key - append (0,0) to the value using the
# extend list function. This indicates
# the pet label is-NOT-a-dog, classifier label is-NOT-a-dog.
#
# Classifier Label IS NOT image of Dog (e.g. NOT in dognames_dic)
# appends (0, 0) because both labels aren't dogs
else:
results_dic[key].extend((0, 0))
def classify_images(images_dir, results_dic, model):
"""
Creates classifier labels with classifier function, compares pet labels to
the classifier labels, and adds the classifier label and the comparison of
the labels to the results dictionary using the extend function. Be sure to
format the classifier labels so that they will match your pet image labels.
The format will include putting the classifier labels in all lower case
letters and strip the leading and trailing whitespace characters from them.
For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese'
so the classifier label = 'maltese dog, maltese terrier, maltese'.
Recall that dog names from the classifier function can be a string of dog
names separated by commas when a particular breed of dog has multiple dog
names associated with that breed. For example, you will find pet images of
a 'dalmatian'(pet label) and it will match to the classifier label
'dalmatian, coach dog, carriage dog' if the classifier function correctly
classified the pet images of dalmatians.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images within this function
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
results_dic - Results Dictionary with 'key' as image filename and 'value'
as a List. Where the list will contain the following items:
index 0 = pet image label (string)
--- where index 1 & index 2 are added by this function ---
NEW - index 1 = classifier label (string)
NEW - index 2 = 1/0 (int) where 1 = match between pet image
and classifer labels and 0 = no match between labels
model - Indicates which CNN model architecture will be used by the
classifier function to classify the pet images,
values must be either: resnet alexnet vgg (string)
Returns:
None - results_dic is mutable data type so no return needed.
"""
# None
first_filename_list = listdir("pet_images/")
filename_list = []
for idx in range(0, len(first_filename_list), 1):
if not first_filename_list[idx].startswith('.'):
filename_list.append(first_filename_list[idx])
idx = 0
for key in results_dic:
# print("---------------")
value=results_dic[key]
# print("\t-----key={}".format(key))
# print("\t-----value={}".format(value))
path = images_dir + filename_list[idx]
# print("\t-----path={}".format(path))
model_label = classifier(path, model)
model_label = model_label.lower()
model_label = model_label.strip()
# print("\t-----model_label={}".format(model_label))
truth = 0
if value in model_label:
truth = 1
results_dic[key] = [ value, model_label, truth ]
# print("\t-----truth={}".format(truth))
idx = idx + 1
def get_pet_label(pet_image):
# Sets string to lower case letters
low_pet_image = pet_image.lower()
# Splits lower case string by _ to break into words
word_list_pet_image = low_pet_image.split("_")
# Create pet_name starting as empty string
pet_name = ""
# Loops to check if word in pet name is only alphabetic characters -
# if true append word to pet_name separated by trailing space
for word in word_list_pet_image:
if word.isalpha():
pet_name += word + " "
# Strip off starting/trailing whitespace characters
pet_name = pet_name.strip()
# Returns resulting pet_name
return pet_name
def print_dict(dict):
for item in dict.items():
print(item)
def main():
in_arg = get_input_args()
first_filename_list = listdir("pet_images/")
filename_list = []
for idx in range(0, len(first_filename_list), 1):
if not first_filename_list[idx].startswith('.'):
filename_list.append(first_filename_list[idx])
results_dic = dict()
for idx in range(0, len(filename_list), 1):
if filename_list[idx] not in results_dic:
results_dic[filename_list[idx]] = get_pet_label(filename_list[idx])
classify_images(in_arg.dir, results_dic, in_arg.arch)
adjust_results4_isadog(results_dic, in_arg.dogfile)
results_dic_output = calculates_results_stats(results_dic)
print_dict(results_dic_output)
#----------------------------------------------------------------------------------------------------
main()
|
normal
|
{
"blob_id": "f96c9753f3cbb0e554f9f05591e23943009c8955",
"index": 2371,
"step-1": "<mask token>\n\n\ndef calculates_results_stats(results_dic):\n \"\"\"\n Calculates statistics of the results of the program run using classifier's model \n architecture to classifying pet images. Then puts the results statistics in a \n dictionary (results_stats_dic) so that it's returned for printing as to help\n the user to determine the 'best' model for classifying images. Note that \n the statistics calculated as the results are either percentages or counts.\n Parameters:\n results_dic - Dictionary with key as image filename and value as a List \n (index)idx 0 = pet image label (string)\n idx 1 = classifier label (string)\n idx 2 = 1/0 (int) where 1 = match between pet image and \n classifer labels and 0 = no match between labels\n idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n idx 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n Returns:\n results_stats_dic - Dictionary that contains the results statistics (either\n a percentage or a count) where the key is the statistic's \n name (starting with 'pct' for percentage or 'n' for count)\n and the value is the statistic's value. See comments above\n and the classroom Item XX Calculating Results for details\n on how to calculate the counts and statistics.\n \"\"\"\n results_stats_dic = dict()\n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n for key in results_dic:\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n elif results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n results_stats_dic['n_images'] = len(results_dic)\n results_stats_dic['n_notdogs_img'] = results_stats_dic['n_images'\n ] - results_stats_dic['n_dogs_img']\n results_stats_dic['pct_match'] = results_stats_dic['n_match'\n ] / results_stats_dic['n_images'] * 100\n results_stats_dic['pct_correct_dogs'] = results_stats_dic['n_correct_dogs'\n ] / results_stats_dic['n_dogs_img'] * 100\n results_stats_dic['pct_correct_breed'] = results_stats_dic[\n 'n_correct_breed'] / results_stats_dic['n_dogs_img'] * 100\n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = results_stats_dic[\n 'n_correct_notdogs'] / results_stats_dic['n_notdogs_img'] * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n return results_stats_dic\n\n\n<mask token>\n\n\ndef get_pet_label(pet_image):\n low_pet_image = pet_image.lower()\n word_list_pet_image = low_pet_image.split('_')\n pet_name = ''\n for word in word_list_pet_image:\n if word.isalpha():\n pet_name += word + ' '\n pet_name = pet_name.strip()\n return pet_name\n\n\ndef print_dict(dict):\n for item in dict.items():\n print(item)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calculates_results_stats(results_dic):\n \"\"\"\n Calculates statistics of the results of the program run using classifier's model \n architecture to classifying pet images. Then puts the results statistics in a \n dictionary (results_stats_dic) so that it's returned for printing as to help\n the user to determine the 'best' model for classifying images. Note that \n the statistics calculated as the results are either percentages or counts.\n Parameters:\n results_dic - Dictionary with key as image filename and value as a List \n (index)idx 0 = pet image label (string)\n idx 1 = classifier label (string)\n idx 2 = 1/0 (int) where 1 = match between pet image and \n classifer labels and 0 = no match between labels\n idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n idx 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n Returns:\n results_stats_dic - Dictionary that contains the results statistics (either\n a percentage or a count) where the key is the statistic's \n name (starting with 'pct' for percentage or 'n' for count)\n and the value is the statistic's value. See comments above\n and the classroom Item XX Calculating Results for details\n on how to calculate the counts and statistics.\n \"\"\"\n results_stats_dic = dict()\n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n for key in results_dic:\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n elif results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n results_stats_dic['n_images'] = len(results_dic)\n results_stats_dic['n_notdogs_img'] = results_stats_dic['n_images'\n ] - results_stats_dic['n_dogs_img']\n results_stats_dic['pct_match'] = results_stats_dic['n_match'\n ] / results_stats_dic['n_images'] * 100\n results_stats_dic['pct_correct_dogs'] = results_stats_dic['n_correct_dogs'\n ] / results_stats_dic['n_dogs_img'] * 100\n results_stats_dic['pct_correct_breed'] = results_stats_dic[\n 'n_correct_breed'] / results_stats_dic['n_dogs_img'] * 100\n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = results_stats_dic[\n 'n_correct_notdogs'] / results_stats_dic['n_notdogs_img'] * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n return results_stats_dic\n\n\n<mask token>\n\n\ndef classify_images(images_dir, results_dic, model):\n \"\"\"\n Creates classifier labels with classifier function, compares pet labels to \n the classifier labels, and adds the classifier label and the comparison of \n the labels to the results dictionary using the extend function. Be sure to\n format the classifier labels so that they will match your pet image labels.\n The format will include putting the classifier labels in all lower case \n letters and strip the leading and trailing whitespace characters from them.\n For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese' \n so the classifier label = 'maltese dog, maltese terrier, maltese'.\n Recall that dog names from the classifier function can be a string of dog \n names separated by commas when a particular breed of dog has multiple dog \n names associated with that breed. For example, you will find pet images of\n a 'dalmatian'(pet label) and it will match to the classifier label \n 'dalmatian, coach dog, carriage dog' if the classifier function correctly \n classified the pet images of dalmatians.\n PLEASE NOTE: This function uses the classifier() function defined in \n classifier.py within this function. The proper use of this function is\n in test_classifier.py Please refer to this program prior to using the \n classifier() function to classify images within this function \n Parameters: \n images_dir - The (full) path to the folder of images that are to be\n classified by the classifier function (string)\n results_dic - Results Dictionary with 'key' as image filename and 'value'\n as a List. Where the list will contain the following items: \n index 0 = pet image label (string)\n --- where index 1 & index 2 are added by this function ---\n NEW - index 1 = classifier label (string)\n NEW - index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n model - Indicates which CNN model architecture will be used by the \n classifier function to classify the pet images,\n values must be either: resnet alexnet vgg (string)\n Returns:\n None - results_dic is mutable data type so no return needed. \n \"\"\"\n first_filename_list = listdir('pet_images/')\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n idx = 0\n for key in results_dic:\n value = results_dic[key]\n path = images_dir + filename_list[idx]\n model_label = classifier(path, model)\n model_label = model_label.lower()\n model_label = model_label.strip()\n truth = 0\n if value in model_label:\n truth = 1\n results_dic[key] = [value, model_label, truth]\n idx = idx + 1\n\n\ndef get_pet_label(pet_image):\n low_pet_image = pet_image.lower()\n word_list_pet_image = low_pet_image.split('_')\n pet_name = ''\n for word in word_list_pet_image:\n if word.isalpha():\n pet_name += word + ' '\n pet_name = pet_name.strip()\n return pet_name\n\n\ndef print_dict(dict):\n for item in dict.items():\n print(item)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calculates_results_stats(results_dic):\n \"\"\"\n Calculates statistics of the results of the program run using classifier's model \n architecture to classifying pet images. Then puts the results statistics in a \n dictionary (results_stats_dic) so that it's returned for printing as to help\n the user to determine the 'best' model for classifying images. Note that \n the statistics calculated as the results are either percentages or counts.\n Parameters:\n results_dic - Dictionary with key as image filename and value as a List \n (index)idx 0 = pet image label (string)\n idx 1 = classifier label (string)\n idx 2 = 1/0 (int) where 1 = match between pet image and \n classifer labels and 0 = no match between labels\n idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n idx 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n Returns:\n results_stats_dic - Dictionary that contains the results statistics (either\n a percentage or a count) where the key is the statistic's \n name (starting with 'pct' for percentage or 'n' for count)\n and the value is the statistic's value. See comments above\n and the classroom Item XX Calculating Results for details\n on how to calculate the counts and statistics.\n \"\"\"\n results_stats_dic = dict()\n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n for key in results_dic:\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n elif results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n results_stats_dic['n_images'] = len(results_dic)\n results_stats_dic['n_notdogs_img'] = results_stats_dic['n_images'\n ] - results_stats_dic['n_dogs_img']\n results_stats_dic['pct_match'] = results_stats_dic['n_match'\n ] / results_stats_dic['n_images'] * 100\n results_stats_dic['pct_correct_dogs'] = results_stats_dic['n_correct_dogs'\n ] / results_stats_dic['n_dogs_img'] * 100\n results_stats_dic['pct_correct_breed'] = results_stats_dic[\n 'n_correct_breed'] / results_stats_dic['n_dogs_img'] * 100\n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = results_stats_dic[\n 'n_correct_notdogs'] / results_stats_dic['n_notdogs_img'] * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n return results_stats_dic\n\n\ndef adjust_results4_isadog(results_dic, dogfile):\n \"\"\"\n Adjusts the results dictionary to determine if classifier correctly \n classified images 'as a dog' or 'not a dog' especially when not a match. \n Demonstrates if model architecture correctly classifies dog images even if\n it gets dog breed wrong (not a match).\n Parameters:\n results_dic - Dictionary with 'key' as image filename and 'value' as a \n List. Where the list will contain the following items: \n index 0 = pet image label (string)\n index 1 = classifier label (string)\n index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n ------ where index 3 & index 4 are added by this function -----\n NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n dogfile - A text file that contains names of all dogs from the classifier\n function and dog names from the pet image files. This file has \n one dog name per line dog names are all in lowercase with \n spaces separating the distinct words of the dog name. Dog names\n from the classifier function can be a string of dog names separated\n by commas when a particular breed of dog has multiple dog names \n associated with that breed (ex. maltese dog, maltese terrier, \n maltese) (string - indicates text file's filename)\n Returns:\n None - results_dic is mutable data type so no return needed.\n \"\"\"\n dognames_dic = dict()\n with open(dogfile, 'r') as infile:\n line = infile.readline()\n while line != '':\n line = line.strip('\\n')\n if line not in dognames_dic:\n dognames_dic[line] = 1\n line = infile.readline()\n for key in results_dic:\n if results_dic[key][0] in dognames_dic:\n if results_dic[key][1] in dognames_dic:\n results_dic[key].extend((1, 1))\n else:\n results_dic[key].extend((1, 0))\n elif results_dic[key][1] in dognames_dic:\n results_dic[key].extend((0, 1))\n else:\n results_dic[key].extend((0, 0))\n\n\ndef classify_images(images_dir, results_dic, model):\n \"\"\"\n Creates classifier labels with classifier function, compares pet labels to \n the classifier labels, and adds the classifier label and the comparison of \n the labels to the results dictionary using the extend function. Be sure to\n format the classifier labels so that they will match your pet image labels.\n The format will include putting the classifier labels in all lower case \n letters and strip the leading and trailing whitespace characters from them.\n For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese' \n so the classifier label = 'maltese dog, maltese terrier, maltese'.\n Recall that dog names from the classifier function can be a string of dog \n names separated by commas when a particular breed of dog has multiple dog \n names associated with that breed. For example, you will find pet images of\n a 'dalmatian'(pet label) and it will match to the classifier label \n 'dalmatian, coach dog, carriage dog' if the classifier function correctly \n classified the pet images of dalmatians.\n PLEASE NOTE: This function uses the classifier() function defined in \n classifier.py within this function. The proper use of this function is\n in test_classifier.py Please refer to this program prior to using the \n classifier() function to classify images within this function \n Parameters: \n images_dir - The (full) path to the folder of images that are to be\n classified by the classifier function (string)\n results_dic - Results Dictionary with 'key' as image filename and 'value'\n as a List. Where the list will contain the following items: \n index 0 = pet image label (string)\n --- where index 1 & index 2 are added by this function ---\n NEW - index 1 = classifier label (string)\n NEW - index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n model - Indicates which CNN model architecture will be used by the \n classifier function to classify the pet images,\n values must be either: resnet alexnet vgg (string)\n Returns:\n None - results_dic is mutable data type so no return needed. \n \"\"\"\n first_filename_list = listdir('pet_images/')\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n idx = 0\n for key in results_dic:\n value = results_dic[key]\n path = images_dir + filename_list[idx]\n model_label = classifier(path, model)\n model_label = model_label.lower()\n model_label = model_label.strip()\n truth = 0\n if value in model_label:\n truth = 1\n results_dic[key] = [value, model_label, truth]\n idx = idx + 1\n\n\ndef get_pet_label(pet_image):\n low_pet_image = pet_image.lower()\n word_list_pet_image = low_pet_image.split('_')\n pet_name = ''\n for word in word_list_pet_image:\n if word.isalpha():\n pet_name += word + ' '\n pet_name = pet_name.strip()\n return pet_name\n\n\ndef print_dict(dict):\n for item in dict.items():\n print(item)\n\n\ndef main():\n in_arg = get_input_args()\n first_filename_list = listdir('pet_images/')\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n results_dic = dict()\n for idx in range(0, len(filename_list), 1):\n if filename_list[idx] not in results_dic:\n results_dic[filename_list[idx]] = get_pet_label(filename_list[idx])\n classify_images(in_arg.dir, results_dic, in_arg.arch)\n adjust_results4_isadog(results_dic, in_arg.dogfile)\n results_dic_output = calculates_results_stats(results_dic)\n print_dict(results_dic_output)\n\n\nmain()\n",
"step-4": "from classifier import classifier\nfrom get_input_args import get_input_args\nfrom os import listdir\n\n\ndef calculates_results_stats(results_dic):\n \"\"\"\n Calculates statistics of the results of the program run using classifier's model \n architecture to classifying pet images. Then puts the results statistics in a \n dictionary (results_stats_dic) so that it's returned for printing as to help\n the user to determine the 'best' model for classifying images. Note that \n the statistics calculated as the results are either percentages or counts.\n Parameters:\n results_dic - Dictionary with key as image filename and value as a List \n (index)idx 0 = pet image label (string)\n idx 1 = classifier label (string)\n idx 2 = 1/0 (int) where 1 = match between pet image and \n classifer labels and 0 = no match between labels\n idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n idx 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n Returns:\n results_stats_dic - Dictionary that contains the results statistics (either\n a percentage or a count) where the key is the statistic's \n name (starting with 'pct' for percentage or 'n' for count)\n and the value is the statistic's value. See comments above\n and the classroom Item XX Calculating Results for details\n on how to calculate the counts and statistics.\n \"\"\"\n results_stats_dic = dict()\n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n for key in results_dic:\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n elif results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n results_stats_dic['n_images'] = len(results_dic)\n results_stats_dic['n_notdogs_img'] = results_stats_dic['n_images'\n ] - results_stats_dic['n_dogs_img']\n results_stats_dic['pct_match'] = results_stats_dic['n_match'\n ] / results_stats_dic['n_images'] * 100\n results_stats_dic['pct_correct_dogs'] = results_stats_dic['n_correct_dogs'\n ] / results_stats_dic['n_dogs_img'] * 100\n results_stats_dic['pct_correct_breed'] = results_stats_dic[\n 'n_correct_breed'] / results_stats_dic['n_dogs_img'] * 100\n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = results_stats_dic[\n 'n_correct_notdogs'] / results_stats_dic['n_notdogs_img'] * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n return results_stats_dic\n\n\ndef adjust_results4_isadog(results_dic, dogfile):\n \"\"\"\n Adjusts the results dictionary to determine if classifier correctly \n classified images 'as a dog' or 'not a dog' especially when not a match. \n Demonstrates if model architecture correctly classifies dog images even if\n it gets dog breed wrong (not a match).\n Parameters:\n results_dic - Dictionary with 'key' as image filename and 'value' as a \n List. Where the list will contain the following items: \n index 0 = pet image label (string)\n index 1 = classifier label (string)\n index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n ------ where index 3 & index 4 are added by this function -----\n NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n dogfile - A text file that contains names of all dogs from the classifier\n function and dog names from the pet image files. This file has \n one dog name per line dog names are all in lowercase with \n spaces separating the distinct words of the dog name. Dog names\n from the classifier function can be a string of dog names separated\n by commas when a particular breed of dog has multiple dog names \n associated with that breed (ex. maltese dog, maltese terrier, \n maltese) (string - indicates text file's filename)\n Returns:\n None - results_dic is mutable data type so no return needed.\n \"\"\"\n dognames_dic = dict()\n with open(dogfile, 'r') as infile:\n line = infile.readline()\n while line != '':\n line = line.strip('\\n')\n if line not in dognames_dic:\n dognames_dic[line] = 1\n line = infile.readline()\n for key in results_dic:\n if results_dic[key][0] in dognames_dic:\n if results_dic[key][1] in dognames_dic:\n results_dic[key].extend((1, 1))\n else:\n results_dic[key].extend((1, 0))\n elif results_dic[key][1] in dognames_dic:\n results_dic[key].extend((0, 1))\n else:\n results_dic[key].extend((0, 0))\n\n\ndef classify_images(images_dir, results_dic, model):\n \"\"\"\n Creates classifier labels with classifier function, compares pet labels to \n the classifier labels, and adds the classifier label and the comparison of \n the labels to the results dictionary using the extend function. Be sure to\n format the classifier labels so that they will match your pet image labels.\n The format will include putting the classifier labels in all lower case \n letters and strip the leading and trailing whitespace characters from them.\n For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese' \n so the classifier label = 'maltese dog, maltese terrier, maltese'.\n Recall that dog names from the classifier function can be a string of dog \n names separated by commas when a particular breed of dog has multiple dog \n names associated with that breed. For example, you will find pet images of\n a 'dalmatian'(pet label) and it will match to the classifier label \n 'dalmatian, coach dog, carriage dog' if the classifier function correctly \n classified the pet images of dalmatians.\n PLEASE NOTE: This function uses the classifier() function defined in \n classifier.py within this function. The proper use of this function is\n in test_classifier.py Please refer to this program prior to using the \n classifier() function to classify images within this function \n Parameters: \n images_dir - The (full) path to the folder of images that are to be\n classified by the classifier function (string)\n results_dic - Results Dictionary with 'key' as image filename and 'value'\n as a List. Where the list will contain the following items: \n index 0 = pet image label (string)\n --- where index 1 & index 2 are added by this function ---\n NEW - index 1 = classifier label (string)\n NEW - index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n model - Indicates which CNN model architecture will be used by the \n classifier function to classify the pet images,\n values must be either: resnet alexnet vgg (string)\n Returns:\n None - results_dic is mutable data type so no return needed. \n \"\"\"\n first_filename_list = listdir('pet_images/')\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n idx = 0\n for key in results_dic:\n value = results_dic[key]\n path = images_dir + filename_list[idx]\n model_label = classifier(path, model)\n model_label = model_label.lower()\n model_label = model_label.strip()\n truth = 0\n if value in model_label:\n truth = 1\n results_dic[key] = [value, model_label, truth]\n idx = idx + 1\n\n\ndef get_pet_label(pet_image):\n low_pet_image = pet_image.lower()\n word_list_pet_image = low_pet_image.split('_')\n pet_name = ''\n for word in word_list_pet_image:\n if word.isalpha():\n pet_name += word + ' '\n pet_name = pet_name.strip()\n return pet_name\n\n\ndef print_dict(dict):\n for item in dict.items():\n print(item)\n\n\ndef main():\n in_arg = get_input_args()\n first_filename_list = listdir('pet_images/')\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n results_dic = dict()\n for idx in range(0, len(filename_list), 1):\n if filename_list[idx] not in results_dic:\n results_dic[filename_list[idx]] = get_pet_label(filename_list[idx])\n classify_images(in_arg.dir, results_dic, in_arg.arch)\n adjust_results4_isadog(results_dic, in_arg.dogfile)\n results_dic_output = calculates_results_stats(results_dic)\n print_dict(results_dic_output)\n\n\nmain()\n",
"step-5": "from classifier import classifier \nfrom get_input_args import get_input_args\nfrom os import listdir\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# */AIPND-revision/intropyproject-classify-pet-images/calculates_results_stats_hints.py\n# \n# PROGRAMMER:\n# DATE CREATED: \n# REVISED DATE: \n# PURPOSE: This is a *hints* file to help guide students in creating the \n# function calculates_results_stats that calculates the statistics\n# of the results of the programrun using the classifier's model \n# architecture to classify the images. This function will use the \n# results in the results dictionary to calculate these statistics. \n# This function will then put the results statistics in a dictionary\n# (results_stats_dic) that's created and returned by this function.\n# This will allow the user of the program to determine the 'best' \n# model for classifying the images. The statistics that are calculated\n# will be counts and percentages. Please see \"Intro to Python - Project\n# classifying Images - xx Calculating Results\" for details on the \n# how to calculate the counts and percentages for this function. \n# This function inputs:\n# - The results dictionary as results_dic within calculates_results_stats \n# function and results for the function call within main.\n# This function creates and returns the Results Statistics Dictionary -\n# results_stats_dic. This dictionary contains the results statistics \n# (either a percentage or a count) where the key is the statistic's \n# name (starting with 'pct' for percentage or 'n' for count) and value \n# is the statistic's value. This dictionary should contain the \n# following keys:\n# n_images - number of images\n# n_dogs_img - number of dog images\n# n_notdogs_img - number of NON-dog images\n# n_match - number of matches between pet & classifier labels\n# n_correct_dogs - number of correctly classified dog images\n# n_correct_notdogs - number of correctly classified NON-dog images\n# n_correct_breed - number of correctly classified dog breeds\n# pct_match - percentage of correct matches\n# pct_correct_dogs - percentage of correctly classified dogs\n# pct_correct_breed - percentage of correctly classified dog breeds\n# pct_correct_notdogs - percentage of correctly classified NON-dogs\n#\n##\n# TODO 5: EDIT and ADD code BELOW to do the following that's stated in the \n# comments below that start with \"TODO: 5\" for the calculates_results_stats \n# function. Please be certain to replace None in the return statement with\n# the results_stats_dic dictionary that you create with this function\n# \ndef calculates_results_stats(results_dic):\n \"\"\"\n Calculates statistics of the results of the program run using classifier's model \n architecture to classifying pet images. Then puts the results statistics in a \n dictionary (results_stats_dic) so that it's returned for printing as to help\n the user to determine the 'best' model for classifying images. Note that \n the statistics calculated as the results are either percentages or counts.\n Parameters:\n results_dic - Dictionary with key as image filename and value as a List \n (index)idx 0 = pet image label (string)\n idx 1 = classifier label (string)\n idx 2 = 1/0 (int) where 1 = match between pet image and \n classifer labels and 0 = no match between labels\n idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n idx 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n Returns:\n results_stats_dic - Dictionary that contains the results statistics (either\n a percentage or a count) where the key is the statistic's \n name (starting with 'pct' for percentage or 'n' for count)\n and the value is the statistic's value. See comments above\n and the classroom Item XX Calculating Results for details\n on how to calculate the counts and statistics.\n \"\"\" \n # Creates empty dictionary for results_stats_dic\n results_stats_dic = dict()\n \n # Sets all counters to initial values of zero so that they can \n # be incremented while processing through the images in results_dic \n results_stats_dic['n_dogs_img'] = 0\n results_stats_dic['n_match'] = 0\n results_stats_dic['n_correct_dogs'] = 0\n results_stats_dic['n_correct_notdogs'] = 0\n results_stats_dic['n_correct_breed'] = 0\n \n # process through the results dictionary\n for key in results_dic:\n \n # Labels Match Exactly\n if results_dic[key][2] == 1:\n results_stats_dic['n_match'] += 1\n\n # TODO: 5a. REPLACE pass with CODE that counts how many pet images of\n # dogs had their breed correctly classified. This happens \n # when the pet image label indicates the image is-a-dog AND \n # the pet image label and the classifier label match. You \n # will need to write a conditional statement that determines\n # when the dog breed is correctly classified and then \n # increments 'n_correct_breed' by 1. Recall 'n_correct_breed' \n # is a key in the results_stats_dic dictionary with it's value \n # representing the number of correctly classified dog breeds.\n # \n # Pet Image Label is a Dog AND Labels match- counts Correct Breed\n if results_dic[key][3] == 1 and results_dic[key][2] == 1:\n results_stats_dic['n_correct_breed'] += 1\n \n # Pet Image Label is a Dog - counts number of dog images\n if results_dic[key][3] == 1:\n results_stats_dic['n_dogs_img'] += 1\n \n # Classifier classifies image as Dog (& pet image is a dog)\n # counts number of correct dog classifications\n if results_dic[key][4] == 1:\n results_stats_dic['n_correct_dogs'] += 1\n\n # TODO: 5b. REPLACE pass with CODE that counts how many pet images \n # that are NOT dogs were correctly classified. This happens \n # when the pet image label indicates the image is-NOT-a-dog \n # AND the classifier label indicates the images is-NOT-a-dog.\n # You will need to write a conditional statement that \n # determines when the classifier label indicates the image \n # is-NOT-a-dog and then increments 'n_correct_notdogs' by 1. \n # Recall the 'else:' above 'pass' already indicates that the \n # pet image label indicates the image is-NOT-a-dog and \n # 'n_correct_notdogs' is a key in the results_stats_dic dictionary \n # with it's value representing the number of correctly \n # classified NOT-a-dog images.\n # \n # Pet Image Label is NOT a Dog\n else:\n # Classifier classifies image as NOT a Dog(& pet image isn't a dog)\n # counts number of correct NOT dog clasifications.\n if results_dic[key][3] == 0 and results_dic[key][4] == 0:\n results_stats_dic['n_correct_notdogs'] += 1\n\n\n # Calculates run statistics (counts & percentages) below that are calculated\n # using the counters from above.\n\n # calculates number of total images\n results_stats_dic['n_images'] = len(results_dic)\n\n # calculates number of not-a-dog images using - images & dog images counts\n results_stats_dic['n_notdogs_img'] = (results_stats_dic['n_images'] - \n results_stats_dic['n_dogs_img']) \n\n # TODO: 5c. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # matched images. Recall that this can be calculated by the\n # number of correctly matched images ('n_match') divided by the \n # number of images('n_images'). This result will need to be \n # multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct for matches\n results_stats_dic['pct_match'] = (results_stats_dic['n_match'] / results_stats_dic['n_images']) * 100\n\n # TODO: 5d. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified dog images. Recall that this can be calculated by \n # the number of correctly classified dog images('n_correct_dogs')\n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct dogs\n results_stats_dic['pct_correct_dogs'] = (results_stats_dic['n_correct_dogs'] / results_stats_dic['n_dogs_img']) * 100\n\n # TODO: 5e. REPLACE zero(0.0) with CODE that calculates the % of correctly\n # classified breeds of dogs. Recall that this can be calculated \n # by the number of correctly classified breeds of dog('n_correct_breed') \n # divided by the number of dog images('n_dogs_img'). This result \n # will need to be multiplied by 100.0 to provide the percentage.\n # \n # Calculates % correct breed of dog\n results_stats_dic['pct_correct_breed'] = (results_stats_dic['n_correct_breed'] / results_stats_dic['n_dogs_img']) * 100\n\n # Calculates % correct not-a-dog images\n # Uses conditional statement for when no 'not a dog' images were submitted \n if results_stats_dic['n_notdogs_img'] > 0:\n results_stats_dic['pct_correct_notdogs'] = (results_stats_dic['n_correct_notdogs'] /\n results_stats_dic['n_notdogs_img']) * 100.0\n else:\n results_stats_dic['pct_correct_notdogs'] = 0.0\n\n \n # TODO 5f. REPLACE None with the results_stats_dic dictionary that you \n # created with this function \n return results_stats_dic\n\n#----------------------------------------------------------------------------------------------------\n# METHODS FROM OTHER LESSONS\n#----------------------------------------------------------------------------------------------------\n\ndef adjust_results4_isadog(results_dic, dogfile):\n \"\"\"\n Adjusts the results dictionary to determine if classifier correctly \n classified images 'as a dog' or 'not a dog' especially when not a match. \n Demonstrates if model architecture correctly classifies dog images even if\n it gets dog breed wrong (not a match).\n Parameters:\n results_dic - Dictionary with 'key' as image filename and 'value' as a \n List. Where the list will contain the following items: \n index 0 = pet image label (string)\n index 1 = classifier label (string)\n index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n ------ where index 3 & index 4 are added by this function -----\n NEW - index 3 = 1/0 (int) where 1 = pet image 'is-a' dog and \n 0 = pet Image 'is-NOT-a' dog. \n NEW - index 4 = 1/0 (int) where 1 = Classifier classifies image \n 'as-a' dog and 0 = Classifier classifies image \n 'as-NOT-a' dog.\n dogfile - A text file that contains names of all dogs from the classifier\n function and dog names from the pet image files. This file has \n one dog name per line dog names are all in lowercase with \n spaces separating the distinct words of the dog name. Dog names\n from the classifier function can be a string of dog names separated\n by commas when a particular breed of dog has multiple dog names \n associated with that breed (ex. maltese dog, maltese terrier, \n maltese) (string - indicates text file's filename)\n Returns:\n None - results_dic is mutable data type so no return needed.\n \"\"\" \n # Creates dognames dictionary for quick matching to results_dic labels from\n # real answer & classifier's answer\n dognames_dic = dict()\n\n # Reads in dognames from file, 1 name per line & automatically closes file\n with open(dogfile, \"r\") as infile:\n # Reads in dognames from first line in file\n line = infile.readline()\n\n # Processes each line in file until reaching EOF (end-of-file) by \n # processing line and adding dognames to dognames_dic with while loop\n while line != \"\":\n # print(\"----- line: {}\".format(line))\n\n # TODO: 4a. REPLACE pass with CODE to remove the newline character\n # from the variable line \n #\n # Process line by striping newline from line\n line = line.strip('\\n')\n\n # TODO: 4b. REPLACE pass with CODE to check if the dogname(line) \n # exists within dognames_dic, then if the dogname(line) \n # doesn't exist within dognames_dic then add the dogname(line) \n # to dognames_dic as the 'key' with the 'value' of 1. \n #\n # adds dogname(line) to dogsnames_dic if it doesn't already exist \n # in the dogsnames_dic dictionary\n if line not in dognames_dic:\n dognames_dic[line] = 1\n # print(\"----- dognames_dic[{}]: {}\".format(line, dognames_dic[line]))\n\n # Reads in next line in file to be processed with while loop\n # if this line isn't empty (EOF)\n line = infile.readline()\n\n \n # Add to whether pet labels & classifier labels are dogs by appending\n # two items to end of value(List) in results_dic. \n # List Index 3 = whether(1) or not(0) Pet Image Label is a dog AND \n # List Index 4 = whether(1) or not(0) Classifier Label is a dog\n # How - iterate through results_dic if labels are found in dognames_dic\n # then label \"is a dog\" index3/4=1 otherwise index3/4=0 \"not a dog\"\n for key in results_dic:\n\n # Pet Image Label IS of Dog (e.g. found in dognames_dic)\n if results_dic[key][0] in dognames_dic:\n \n # Classifier Label IS image of Dog (e.g. found in dognames_dic)\n # appends (1, 1) because both labels are dogs\n if results_dic[key][1] in dognames_dic:\n results_dic[key].extend((1, 1))\n # ('cat_01.jpg', ['cat', 'lynx', 0])\n # ('Poodle_07927.jpg', ['poodle', 'standard poodle, poodle', 1])\n\n # TODO: 4c. REPLACE pass BELOW with CODE that adds the following to\n # results_dic dictionary for the key indicated by the \n # variable key - append (1,0) to the value using \n # the extend list function. This indicates\n # the pet label is-a-dog, classifier label is-NOT-a-dog. \n # \n # Classifier Label IS NOT image of dog (e.g. NOT in dognames_dic)\n # appends (1,0) because only pet label is a dog\n else:\n results_dic[key].extend((1, 0))\n\n # Pet Image Label IS NOT a Dog image (e.g. NOT found in dognames_dic)\n else:\n # TODO: 4d. REPLACE pass BELOW with CODE that adds the following to\n # results_dic dictionary for the key indicated by the \n # variable key - append (0,1) to the value uisng\n # the extend list function. This indicates\n # the pet label is-NOT-a-dog, classifier label is-a-dog. \n # \n # Classifier Label IS image of Dog (e.g. found in dognames_dic)\n # appends (0, 1)because only Classifier labe is a dog\n if results_dic[key][1] in dognames_dic:\n results_dic[key].extend((0, 1))\n\n # TODO: 4e. REPLACE pass BELOW with CODE that adds the following to\n # results_dic dictionary for the key indicated by the \n # variable key - append (0,0) to the value using the \n # extend list function. This indicates\n # the pet label is-NOT-a-dog, classifier label is-NOT-a-dog. \n # \n # Classifier Label IS NOT image of Dog (e.g. NOT in dognames_dic)\n # appends (0, 0) because both labels aren't dogs\n else:\n results_dic[key].extend((0, 0))\n\ndef classify_images(images_dir, results_dic, model):\n \"\"\"\n Creates classifier labels with classifier function, compares pet labels to \n the classifier labels, and adds the classifier label and the comparison of \n the labels to the results dictionary using the extend function. Be sure to\n format the classifier labels so that they will match your pet image labels.\n The format will include putting the classifier labels in all lower case \n letters and strip the leading and trailing whitespace characters from them.\n For example, the Classifier function returns = 'Maltese dog, Maltese terrier, Maltese' \n so the classifier label = 'maltese dog, maltese terrier, maltese'.\n Recall that dog names from the classifier function can be a string of dog \n names separated by commas when a particular breed of dog has multiple dog \n names associated with that breed. For example, you will find pet images of\n a 'dalmatian'(pet label) and it will match to the classifier label \n 'dalmatian, coach dog, carriage dog' if the classifier function correctly \n classified the pet images of dalmatians.\n PLEASE NOTE: This function uses the classifier() function defined in \n classifier.py within this function. The proper use of this function is\n in test_classifier.py Please refer to this program prior to using the \n classifier() function to classify images within this function \n Parameters: \n images_dir - The (full) path to the folder of images that are to be\n classified by the classifier function (string)\n results_dic - Results Dictionary with 'key' as image filename and 'value'\n as a List. Where the list will contain the following items: \n index 0 = pet image label (string)\n --- where index 1 & index 2 are added by this function ---\n NEW - index 1 = classifier label (string)\n NEW - index 2 = 1/0 (int) where 1 = match between pet image\n and classifer labels and 0 = no match between labels\n model - Indicates which CNN model architecture will be used by the \n classifier function to classify the pet images,\n values must be either: resnet alexnet vgg (string)\n Returns:\n None - results_dic is mutable data type so no return needed. \n \"\"\"\n \n # None \n\n first_filename_list = listdir(\"pet_images/\")\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n\n idx = 0\n for key in results_dic:\n # print(\"---------------\")\n\n value=results_dic[key]\n # print(\"\\t-----key={}\".format(key))\n # print(\"\\t-----value={}\".format(value))\n \n path = images_dir + filename_list[idx]\n # print(\"\\t-----path={}\".format(path))\n \n model_label = classifier(path, model)\n model_label = model_label.lower()\n model_label = model_label.strip()\n # print(\"\\t-----model_label={}\".format(model_label))\n \n truth = 0\n if value in model_label:\n truth = 1\n\n results_dic[key] = [ value, model_label, truth ]\n # print(\"\\t-----truth={}\".format(truth))\n idx = idx + 1\n\ndef get_pet_label(pet_image):\n # Sets string to lower case letters\n low_pet_image = pet_image.lower()\n\n # Splits lower case string by _ to break into words \n word_list_pet_image = low_pet_image.split(\"_\")\n\n # Create pet_name starting as empty string\n pet_name = \"\"\n\n # Loops to check if word in pet name is only alphabetic characters - \n # if true append word to pet_name separated by trailing space \n for word in word_list_pet_image:\n if word.isalpha():\n pet_name += word + \" \"\n\n # Strip off starting/trailing whitespace characters \n pet_name = pet_name.strip()\n\n # Returns resulting pet_name\n return pet_name\n\ndef print_dict(dict):\n for item in dict.items():\n print(item)\n\ndef main():\n in_arg = get_input_args()\n first_filename_list = listdir(\"pet_images/\")\n filename_list = []\n for idx in range(0, len(first_filename_list), 1):\n if not first_filename_list[idx].startswith('.'):\n filename_list.append(first_filename_list[idx])\n\n results_dic = dict()\n for idx in range(0, len(filename_list), 1):\n if filename_list[idx] not in results_dic:\n results_dic[filename_list[idx]] = get_pet_label(filename_list[idx])\n classify_images(in_arg.dir, results_dic, in_arg.arch)\n adjust_results4_isadog(results_dic, in_arg.dogfile)\n results_dic_output = calculates_results_stats(results_dic)\n print_dict(results_dic_output)\n\n#----------------------------------------------------------------------------------------------------\n\nmain()",
"step-ids": [
3,
4,
7,
8,
9
]
}
|
[
3,
4,
7,
8,
9
] |
# Should print 516
def final_frequency():
frequency = 0
with open('input') as f:
for line in f:
frequency += int(line)
return frequency
print(final_frequency())
|
normal
|
{
"blob_id": "4d68b663933070cb287689b70d6ded07958cef22",
"index": 3047,
"step-1": "<mask token>\n",
"step-2": "def final_frequency():\n frequency = 0\n with open('input') as f:\n for line in f:\n frequency += int(line)\n return frequency\n\n\n<mask token>\n",
"step-3": "def final_frequency():\n frequency = 0\n with open('input') as f:\n for line in f:\n frequency += int(line)\n return frequency\n\n\nprint(final_frequency())\n",
"step-4": "# Should print 516\ndef final_frequency():\n frequency = 0\n\n with open('input') as f:\n for line in f:\n frequency += int(line)\n\n return frequency\n\n\nprint(final_frequency())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# coding=utf-8
import re
str1 = 'http://www.chinapesticide.org.cn/myquery/querydetail?pdno='
str2 = '&pdrgno='
f = open('aaa.txt', 'r')
source = f.read()
rr = re.compile(r'open[(\'](.*)[\']')
s=rr.findall(source)
for line in s:
temps = line.split(',')
a = temps[0]
b = temps[1]
print str1 + a.replace('\'', '').strip() + str2 + b.replace('\'','').strip()
f.close()
|
normal
|
{
"blob_id": "387c48fcf00480a820fb407f5bad1d9f41b28e7a",
"index": 9160,
"step-1": "#!/usr/bin/python\n# coding=utf-8\n\nimport re\n\nstr1 = 'http://www.chinapesticide.org.cn/myquery/querydetail?pdno='\nstr2 = '&pdrgno='\nf = open('aaa.txt', 'r')\nsource = f.read()\nrr = re.compile(r'open[(\\'](.*)[\\']')\ns=rr.findall(source)\nfor line in s:\n temps = line.split(',')\n a = temps[0]\n b = temps[1]\n print str1 + a.replace('\\'', '').strip() + str2 + b.replace('\\'','').strip()\nf.close()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import datetime
import time
from sys import exit
from matplotlib import colors, pyplot as plt
from functools import reduce
import matplotlib.cm as cm
import seaborn as sns
from astropy.io import ascii, fits
from astropy.wcs import wcs
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.interpolate import interp2d
import matplotlib.mlab as mlab
import scipy, pylab
import rpy2
import cubehelix
import math
from pysextractor import SExtractor
__author__ = 'pnovais'
ini=time.time()
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#definindo a classe que ira ler as imagens fits
def get_image(f_sdss):
img = f_sdss[0].data
# sky = f_sdss[2].data
return img
#abertura do arquivo com o nome das imagens, nas n bandas
df_fit = pd.read_csv('data/arquivo_fits.csv')
'''
================================================================================
Rodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo
com os objetos obtidos
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
fname = 'data/frame-r-002507-4-0226.fits'
sex = SExtractor()
sex.config['PARAMETERS_LIST'].append('FLUX_ISO')
sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')
sex.config['PARAMETERS_LIST'].append('MAG_AUTO')
sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')
sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')
sex.config['PARAMETERS_LIST'].append('ALPHA_J2000')
sex.config['PARAMETERS_LIST'].append('DELTA_J2000')
sex.config['PARAMETERS_LIST'].append('FWHM_WORLD')
sex.config['PARAMETERS_LIST'].append('CLASS_STAR')
sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'
sex.run(fname)
segmap = fits.open('check.fits')[0].data
df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)
df_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',
'fwhm_image', 'flux_iso','mag_isocor','mag_auto',
'petro_radius','ISO_AREA','ra','dec',
'fwhm_world','class_star']
#selecao dos objetos que devem ser galaxias
df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]
df_cat = df_cat.reset_index()
df_cat = df_cat.ix[:,1:15]
'''
================================================================================
Lendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia
utilizando astropy
Calculando o ceu em todas as bandas
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
df = pd.DataFrame()
df_sky = pd.DataFrame()
for i_object in range(13,14):
window_size = 250
filter_seg = 'rSDSS'
ra = df_cat['ra']
dec = df_cat['dec']
image_r = fits.open('data/frame-r-002507-4-0226.fits')
wcsys = wcs.WCS(header=image_r[0].header)
y, x = wcsys.wcs_world2pix(ra, dec, 1)
interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),
int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))
df = pd.DataFrame()
df_sky = pd.DataFrame()
seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]
for i_gal in range(len(df_fit)):
f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
img = get_image(f_sdss)
img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]
plt.figure(1)
plt.clf()
plt.imshow(100*np.log10(img_cut/255), cmap='spectral')
plt.colorbar()
band=df_fit['filter'][i_gal]
nrows, ncols = img_cut.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',band])
df = pd.concat([df,temp], axis=1)
sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
sky = get_image(sky_r)
wcsys = wcs.WCS(header=sky_r[0].header)
yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)
delta_x = 85
delta_y = 85
interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),
int(round(yc + delta_y / 2)))
img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]
sky_nrows, sky_ncols = img_sky.shape
xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )
table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))
temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])
df_sky = pd.concat([df_sky,temp_sky], axis=1)
df = df.ix[:, [0,1,2,5,8,11,14]]
df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]
'''
Imagem da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))
img_r = get_image(r_sdss)
img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')
titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
Imagem segmentada da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(seg_sex, cmap='spectral')
titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
================================================================================
Salvando os fluxos de cada galaxia em um arquivo txt
================================================================================
'''
saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]
formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']
headers2='x\ty\tu\tg\tr\ti\tz'
np.savetxt(saida_fluxes,df, delimiter='\t',header=headers2, fmt = formats)
print('')
print('>> Os dados estao em: "%s".' %saida_fluxes)
'''
================================================================================
Subtraindo o ceu, na banda r
================================================================================
'''
df_aux=df.ix[:,2:]
df_aux1=df.ix[:,:2]
df_sky_aux = df_sky.ix[:,2:]
df_aux3 = (df_aux - df_sky_aux.mean())
df_rss=df_aux1.join(df_aux3)
"""
A segmentacao consiste de usar um limiar para separar o objeto do fundo.
No nosso caso, usamos limiar = alpha*std_ceu
"""
'''
================================================================================
SEGMENTACAO
================================================================================
'''
#SELECAO DOS PIXEIS ACIMA DO LIMIAR
limiar = 2.5*df_sky.r.std()
df_seg = df_rss.ix[df_rss['r'] > limiar]
print('Pixeis acima do limiar: %d' %len(df_seg))
np.savetxt('fof2.txt',df_seg,delimiter='\t')
fim = time.time()
time_proc = fim - ini
print('')
print(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
|
normal
|
{
"blob_id": "736fee6f9a46b8568b2dd217b81d54d689306630",
"index": 970,
"step-1": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<mask token>\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\n<mask token>\nsex.run(fname)\n<mask token>\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\n<mask token>\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-3": "<mask token>\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<mask token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<mask token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"step-5": "\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n\n__author__ = 'pnovais'\n\nini=time.time()\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n#definindo a classe que ira ler as imagens fits\ndef get_image(f_sdss):\n img = f_sdss[0].data\n# sky = f_sdss[2].data\n return img\n\n#abertura do arquivo com o nome das imagens, nas n bandas\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n\n'''\n================================================================================\nRodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo\ncom os objetos obtidos\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\n\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',\n 'fwhm_image', 'flux_iso','mag_isocor','mag_auto',\n 'petro_radius','ISO_AREA','ra','dec',\n 'fwhm_world','class_star']\n\n#selecao dos objetos que devem ser galaxias\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:,1:15]\n\n'''\n================================================================================\nLendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia\nutilizando astropy\nCalculando o ceu em todas as bandas\n\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\n\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\n\n\nfor i_object in range(13,14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),\n int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100*np.log10(img_cut/255), cmap='spectral')\n plt.colorbar()\n band=df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )\n table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))\n temp = pd.DataFrame(table, columns=['x','y',band])\n df = pd.concat([df,temp], axis=1)\n\n sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),\n int(round(yc + delta_y / 2)))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )\n table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))\n temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])\n df_sky = pd.concat([df_sky,temp_sky], axis=1)\n\n df = df.ix[:, [0,1,2,5,8,11,14]]\n df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]\n\n '''\n Imagem da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')\n titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n '''\n Imagem segmentada da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n\n '''\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n '''\n saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]\n formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']\n headers2='x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes,df, delimiter='\\t',header=headers2, fmt = formats)\n print('')\n print('>> Os dados estao em: \"%s\".' %saida_fluxes)\n\n '''\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n '''\n df_aux=df.ix[:,2:]\n df_aux1=df.ix[:,:2]\n df_sky_aux = df_sky.ix[:,2:]\n df_aux3 = (df_aux - df_sky_aux.mean())\n df_rss=df_aux1.join(df_aux3)\n\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n '''\n ================================================================================\n SEGMENTACAO\n ================================================================================\n '''\n #SELECAO DOS PIXEIS ACIMA DO LIMIAR\n limiar = 2.5*df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' %len(df_seg))\n np.savetxt('fof2.txt',df_seg,delimiter='\\t')\n\n\n\n\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import scipy.io as sio
import glob
import numpy as np
import matplotlib.pyplot as plt
import math
import os,sys
BIN = os.path.expanduser("../tools/")
sys.path.append(BIN)
import myfilemanager as mfm
import mystyle as ms
import propsort as ps
from functools import partial
from scipy.ndimage import gaussian_filter1d
from scipy.constants import c as clight
plt.close('all')
# Scan Parameters
fraction_device_quad_vect = [0.07, 0.16, 0.26]
n_slices_vect = np.array([250., 500., 750., 1000.])
betax_vect = [50, 100, 150, 200, 300, 400, 500, 600]
# Simulations Parameters
PyPICmode_tag = 'Tblocked'
# If you want to save the figures with all the scan parameters choose: savefigures = True and mode = 'auto'
savefigure = True
mode = 'auto'
#~ # Comment this part if you want to save the plots. You can choose only some scan parameters
#~ savefigure = False
#~ fraction_device_quad_vect = [0.26]
#~ n_slices_vect = np.array([1000.,])
#~ betax_vect = [100]
#~ mode = 'manual'
#~ turn_obs = 350
betay_vect = betax_vect
folder_plot = 'intrabunch_modes/'
if not os.path.exists(folder_plot) and savefigure:
os.makedirs(folder_plot)
# choice of the window of turns
# import the dictionary elements
dic = sio.loadmat('tt_complete.mat')
tt = np.squeeze(dic['tt_first'])
smooth = partial(gaussian_filter1d, sigma=2, mode='nearest')
n_turns_window = 20
n_sigmaz_sim = 10. #we are simulating 10 long sigmas
i_want_to_count_over = 4.
flag_weighted = True
#Figure parameters
ii_fig = 0
tick_size = 20
axis_font = {'fontname':'Arial', 'size':'24'}
fig_size = (15, 5)
line_width = 3.5
ms.mystyle_arial(16)
# calculate intra-bunch modes
for fraction_device_quad in fraction_device_quad_vect:
kk = np.argmin(np.abs(dic['fraction_device_quad_vect']-fraction_device_quad))
for betax, betay in zip(betax_vect, betay_vect):
jj = np.argmin(np.abs(dic['betax_vect']-betax))
subfolder_plot = folder_plot + 'betaxy_%d_length_%.2f/'%(betax,fraction_device_quad)
if not os.path.exists(subfolder_plot) and savefigure:
os.makedirs(subfolder_plot)
for n_slices in n_slices_vect:
ii = np.argmin(np.abs(dic['n_slices_vect']-n_slices))
if not math.isnan(tt[ii,jj,kk]):
if mode == 'auto':
wind_center = int(tt[ii,jj,kk])
elif mode == 'manual':
wind_center = turn_obs
else:
raise ValueError("I don't understand!?")
start = [wind_center + n_turns_window/2]
if int(tt[ii,jj,kk]) - n_turns_window/2 < 0:
window_min = 1
window = [np.s_[1:s] for s in start]
else:
window_min = wind_center - n_turns_window/2
window = [np.s_[s-n_turns_window:s] for s in start]
window_max = wind_center + n_turns_window/2
folder_curr_sim = '../simulations_PyPARIS/transverse_grid_%s_betaxy_%.0fm_length%.2f_slices_%d'%(PyPICmode_tag, betax,fraction_device_quad,n_slices)
sim_curr_list = ps.sort_properly(glob.glob(folder_curr_sim+'/slice_evolution_*.h5'))
print sim_curr_list[0]
try:
data = mfm.monitorh5list_to_obj(sim_curr_list, key='Slices', flag_transpose=True)
if flag_weighted:
bpm_x = data.mean_x * data.n_macroparticles_per_slice
bpm_y = data.mean_y * data.n_macroparticles_per_slice
else:
bpm_x = data.mean_x
bpm_y = data.mean_y
xfft = np.fft.rfft(bpm_x, axis=0)
yfft = np.fft.rfft(bpm_y, axis=0)
xfft = np.abs(xfft)**2 #Power
yfft = np.abs(yfft)**2 #Power
for wd in window:
print wd
n_slices, n_turns = data.mean_z.shape
zz = np.linspace(-2.5e-9*clight/2, 2.5e-9*clight/2, n_slices)
xx, yy = bpm_x, bpm_y
# Setting to plot the fft
xftt_to_plot = np.log10(xfft.T)
yftt_to_plot = np.log10(yfft.T)
minval_x = np.max([xftt_to_plot])-3
minval_y = np.max([yftt_to_plot])-3
xftt_to_plot[xftt_to_plot<minval_x] = minval_x
yftt_to_plot[yftt_to_plot<minval_y] = minval_y
YY_to_plot, XX_to_plot = xftt_to_plot.shape
XX_to_plot = np.arange(XX_to_plot)
YY_to_plot = np.arange(YY_to_plot)
fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=fig_size)
fig.patch.set_facecolor('w')
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.3)
xmin, xmax = wd.start, wd.stop
col = plt.cm.rainbow_r(np.linspace(0, 1, xmax-xmin))
for i, t in enumerate(range(n_turns)[wd]):
ax1.plot(zz, smooth(bpm_x[:, t]), c=col[i], linewidth=line_width)
ax2.plot(zz, smooth(bpm_y[:, t]), c=col[i], linewidth=line_width)
ax1.set_xlabel('z [m]')
ax2.set_xlabel('z [m]')
ax1.set_title('Turns %.0f - %.0f'%(window_min, window_max))
ax2.set_title('Turns %.0f - %.0f'%(window_min, window_max))
if flag_weighted:
ax1.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)
ax2.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)
ax1.set_ylabel('Charge weighted\nhorizontal signal')
ax2.set_ylabel('Charge weighted\nvertical signal')
else:
ax1.set_xlim(-0.30, 0.30)
ax2.set_xlim(-0.30, 0.30)
#~ ax1.set_ylim(-.0001,.0001)
#~ ax2.set_ylim(-.0001,.0001)
ax1.set_ylabel('Horizontal signal')
ax2.set_ylabel('Vertical signal')
title = fig.suptitle('Beta_xy = %.0f Fraction Device = %.3f Slices = %.0f\n'%(betax, fraction_device_quad, n_slices))
if flag_weighted and savefigure:
plt.savefig(subfolder_plot + 'charge_weighted_betaxy_%d_length_%.2f_slices_%.0f.png'%(betax, fraction_device_quad, n_slices), dpi=300, bbox_inches='tight')
except IOError as goterror:
print 'Skipped. Got:', goterror
plt.show()
|
normal
|
{
"blob_id": "a4f56b1f93f62d80707367eaba0bba7ef4b2caca",
"index": 4749,
"step-1": "import scipy.io as sio\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nimport os,sys\nBIN = os.path.expanduser(\"../tools/\")\nsys.path.append(BIN)\nimport myfilemanager as mfm\nimport mystyle as ms \nimport propsort as ps\n\nfrom functools import partial\nfrom scipy.ndimage import gaussian_filter1d\n\nfrom scipy.constants import c as clight\n\n\nplt.close('all')\n\n\n# Scan Parameters\nfraction_device_quad_vect = [0.07, 0.16, 0.26]\nn_slices_vect = np.array([250., 500., 750., 1000.])\nbetax_vect = [50, 100, 150, 200, 300, 400, 500, 600]\n\n# Simulations Parameters\nPyPICmode_tag = 'Tblocked'\n\n\n# If you want to save the figures with all the scan parameters choose: savefigures = True and mode = 'auto'\nsavefigure = True\nmode = 'auto'\n#~ # Comment this part if you want to save the plots. You can choose only some scan parameters\n#~ savefigure = False \n#~ fraction_device_quad_vect = [0.26]\n#~ n_slices_vect = np.array([1000.,])\n#~ betax_vect = [100]\n#~ mode = 'manual'\n#~ turn_obs = 350\n\nbetay_vect = betax_vect\nfolder_plot = 'intrabunch_modes/'\nif not os.path.exists(folder_plot) and savefigure:\n os.makedirs(folder_plot)\n\n\n# choice of the window of turns\n# import the dictionary elements\ndic = sio.loadmat('tt_complete.mat')\ntt = np.squeeze(dic['tt_first'])\n\nsmooth = partial(gaussian_filter1d, sigma=2, mode='nearest')\n\nn_turns_window = 20\nn_sigmaz_sim = 10. #we are simulating 10 long sigmas\ni_want_to_count_over = 4.\nflag_weighted = True\n\n\n#Figure parameters\nii_fig = 0\ntick_size = 20\naxis_font = {'fontname':'Arial', 'size':'24'}\nfig_size = (15, 5)\nline_width = 3.5\n\nms.mystyle_arial(16)\n\n\n# calculate intra-bunch modes\nfor fraction_device_quad in fraction_device_quad_vect:\n \n kk = np.argmin(np.abs(dic['fraction_device_quad_vect']-fraction_device_quad))\n for betax, betay in zip(betax_vect, betay_vect):\n jj = np.argmin(np.abs(dic['betax_vect']-betax)) \n subfolder_plot = folder_plot + 'betaxy_%d_length_%.2f/'%(betax,fraction_device_quad)\n if not os.path.exists(subfolder_plot) and savefigure:\n os.makedirs(subfolder_plot)\n \n for n_slices in n_slices_vect:\n ii = np.argmin(np.abs(dic['n_slices_vect']-n_slices)) \n if not math.isnan(tt[ii,jj,kk]):\n\n if mode == 'auto':\n wind_center = int(tt[ii,jj,kk])\n elif mode == 'manual':\n wind_center = turn_obs\n else:\n raise ValueError(\"I don't understand!?\")\n\n start = [wind_center + n_turns_window/2]\n \n if int(tt[ii,jj,kk]) - n_turns_window/2 < 0:\n window_min = 1\n window = [np.s_[1:s] for s in start]\n else:\n window_min = wind_center - n_turns_window/2\n window = [np.s_[s-n_turns_window:s] for s in start]\n \n window_max = wind_center + n_turns_window/2\n \n folder_curr_sim = '../simulations_PyPARIS/transverse_grid_%s_betaxy_%.0fm_length%.2f_slices_%d'%(PyPICmode_tag, betax,fraction_device_quad,n_slices) \n \n sim_curr_list = ps.sort_properly(glob.glob(folder_curr_sim+'/slice_evolution_*.h5'))\n\n \n print sim_curr_list[0]\n\n try:\n data = mfm.monitorh5list_to_obj(sim_curr_list, key='Slices', flag_transpose=True)\n\n if flag_weighted:\n bpm_x = data.mean_x * data.n_macroparticles_per_slice\n bpm_y = data.mean_y * data.n_macroparticles_per_slice\n else:\n bpm_x = data.mean_x \n bpm_y = data.mean_y \n\n xfft = np.fft.rfft(bpm_x, axis=0)\n yfft = np.fft.rfft(bpm_y, axis=0)\n xfft = np.abs(xfft)**2 #Power\n yfft = np.abs(yfft)**2 #Power\n\n\n for wd in window:\n print wd\n\n n_slices, n_turns = data.mean_z.shape\n zz = np.linspace(-2.5e-9*clight/2, 2.5e-9*clight/2, n_slices)\n xx, yy = bpm_x, bpm_y \n\n # Setting to plot the fft\n xftt_to_plot = np.log10(xfft.T)\n yftt_to_plot = np.log10(yfft.T)\n minval_x = np.max([xftt_to_plot])-3\n minval_y = np.max([yftt_to_plot])-3\n xftt_to_plot[xftt_to_plot<minval_x] = minval_x\n yftt_to_plot[yftt_to_plot<minval_y] = minval_y\n \n YY_to_plot, XX_to_plot = xftt_to_plot.shape\n XX_to_plot = np.arange(XX_to_plot)\n YY_to_plot = np.arange(YY_to_plot)\n\n fig, ((ax1, ax2)) = plt.subplots(1, 2, figsize=fig_size)\n fig.patch.set_facecolor('w')\n fig.subplots_adjust(left=0.05, right=0.95, wspace=0.3)\n \n xmin, xmax = wd.start, wd.stop\n col = plt.cm.rainbow_r(np.linspace(0, 1, xmax-xmin))\n for i, t in enumerate(range(n_turns)[wd]):\n ax1.plot(zz, smooth(bpm_x[:, t]), c=col[i], linewidth=line_width)\n ax2.plot(zz, smooth(bpm_y[:, t]), c=col[i], linewidth=line_width)\n\n\n ax1.set_xlabel('z [m]')\n ax2.set_xlabel('z [m]')\n\n ax1.set_title('Turns %.0f - %.0f'%(window_min, window_max))\n ax2.set_title('Turns %.0f - %.0f'%(window_min, window_max))\n\n if flag_weighted:\n ax1.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)\n ax2.set_xlim(-2.5e-9*c/2, 2.5e-9*c/2)\n ax1.set_ylabel('Charge weighted\\nhorizontal signal')\n ax2.set_ylabel('Charge weighted\\nvertical signal')\n\n else:\n ax1.set_xlim(-0.30, 0.30)\n ax2.set_xlim(-0.30, 0.30)\n #~ ax1.set_ylim(-.0001,.0001)\n #~ ax2.set_ylim(-.0001,.0001)\n ax1.set_ylabel('Horizontal signal')\n ax2.set_ylabel('Vertical signal')\n\n title = fig.suptitle('Beta_xy = %.0f Fraction Device = %.3f Slices = %.0f\\n'%(betax, fraction_device_quad, n_slices))\n\n if flag_weighted and savefigure:\n plt.savefig(subfolder_plot + 'charge_weighted_betaxy_%d_length_%.2f_slices_%.0f.png'%(betax, fraction_device_quad, n_slices), dpi=300, bbox_inches='tight')\n \n except IOError as goterror:\n print 'Skipped. Got:', goterror\n \nplt.show()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generated by Django 3.1.6 on 2021-02-05 00:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tea',
name='caffeineLvl',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='tea',
name='quantPerBox',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='tea',
name='quantity',
field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),
),
]
|
normal
|
{
"blob_id": "db920f4aadfb53bb26c5ba1fb182f12b95e14a2f",
"index": 7899,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0001_initial')]\n operations = [migrations.AlterField(model_name='tea', name=\n 'caffeineLvl', field=models.PositiveIntegerField(default=1,\n validators=[django.core.validators.MaxValueValidator(5), django.\n core.validators.MinValueValidator(1)])), migrations.AlterField(\n model_name='tea', name='quantPerBox', field=models.\n PositiveIntegerField(default=1, validators=[django.core.validators.\n MaxValueValidator(100), django.core.validators.MinValueValidator(1)\n ])), migrations.AlterField(model_name='tea', name='quantity', field\n =models.PositiveIntegerField(default=1, validators=[django.core.\n validators.MaxValueValidator(100), django.core.validators.\n MinValueValidator(1)]))]\n",
"step-4": "import django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('main_app', '0001_initial')]\n operations = [migrations.AlterField(model_name='tea', name=\n 'caffeineLvl', field=models.PositiveIntegerField(default=1,\n validators=[django.core.validators.MaxValueValidator(5), django.\n core.validators.MinValueValidator(1)])), migrations.AlterField(\n model_name='tea', name='quantPerBox', field=models.\n PositiveIntegerField(default=1, validators=[django.core.validators.\n MaxValueValidator(100), django.core.validators.MinValueValidator(1)\n ])), migrations.AlterField(model_name='tea', name='quantity', field\n =models.PositiveIntegerField(default=1, validators=[django.core.\n validators.MaxValueValidator(100), django.core.validators.\n MinValueValidator(1)]))]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-05 00:27\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main_app', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='tea',\n name='caffeineLvl',\n field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(1)]),\n ),\n migrations.AlterField(\n model_name='tea',\n name='quantPerBox',\n field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),\n ),\n migrations.AlterField(\n model_name='tea',\n name='quantity',\n field=models.PositiveIntegerField(default=1, validators=[django.core.validators.MaxValueValidator(100), django.core.validators.MinValueValidator(1)]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from preprocessing import *
from utils import *
def find_optimal_param(lda, x_train, y_train):
probs_train = lda.predict_proba(x_train)[:, 1]
y_train = [x for _,x in sorted(zip(probs_train,y_train))]
y_train = np.array(y_train)
probs_train.sort()
Se = []
Sp = []
for p in range(len(probs_train)):
tp = np.count_nonzero(y_train[p:] == 1)
fp = np.count_nonzero(y_train[p:] == 0)
tn = np.count_nonzero(y_train[:p] == 0)
fn = np.count_nonzero(y_train[:p] == 1)
Se.append(tp/(tp+fn))
Sp.append(tn/(tn+fp))
mx = np.argmax(-(1-np.array(Sp) - np.array(Se)))
return probs_train[mx]
def predict(lda, x, y, m):
tp = 0
fp = 0
tn = 0
fn = 0
if len(x) != 0:
probs= lda.predict_proba(x)[:, 1]
for j in range(len(x)):
if probs[j] > m:
if y[j] == 1:
tp+=1
else:
fp+=1
else:
if y[j] == 1:
fn +=1
else:
tn +=1
return tp, fp, fn, tn
from methodutils import FdaUtils
class FDA_node(object):
def __init__(self):
"""Constructor"""
self.method = FdaUtils()
self.left = None
self.right = None
self.m = 0.5
def grow(self):
self.right = FDA_node()
self.left = FDA_node()
def find_optimal_param(self, x, y):
self.m = self.method.find_optimal_param(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
self.left.find_optimal_param(x[left], y[left])
self.right.find_optimal_param(x[right], y[right])
def fit(self, x, y):
self.method.fit(x, y)
if self.left != None and self.right != None:
left, right = self.divide_data(x)
if (max(y[left]) == 0 or min(y[right]) == 1):
self.left = self.right = None
else:
self.right.fit(x[left], y[left])
self.left.fit(x[right], y[right])
def divide_data(self, x):
probs = self.method.predict_proba(x)[:, 1]
left = (probs <= self.m)
right = (probs > self.m)
return left, right
def predict(self, x):
if self.left == None and self.right == None:
pred = self.method.predict(x, self.m)
elif self.left != None and self.right != None:
left, right = self.divide_data(x)
l_pred = self.left.predict(x[left])
r_pred =self.right.predict(x[right])
pred = np.ones(x.shape[0])*2
pred[left] = l_pred
pred[right] = r_pred
return pred
if __name__ == "__main__":
np.seterr(all='raise')
from sklearn.metrics import confusion_matrix
from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW
from fisher_discriminant import FisherDiscriminantAnalisys
num_components = 100
infile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\data_old_and_new_without_noise.pkl', 'rb')
(old, new) = pkl.load(infile)
infile.close()
Y = old["y"]
outfile = open('C:\\Users\\donte_000\\PycharmProjects\\Basic_Methods\\data\\6002_old_Dif.pkl', 'rb')
X = pkl.load(outfile)
outfile.close()
pca = PCA(n_components=X.shape[0])
b = pca.fit_transform(X)
for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):
y_prediction =[]
y_labels = []
for train_index, test_index in cross_val(b.shape[0], 500):
tree = FDA_node()
tree.grow()
tree.fit(b[train_index, :num_components],Y[train_index,d])
tree.find_optimal_param(b[train_index, :num_components], Y[train_index,d])
y_prediction.append(tree.predict(b[test_index, :num_components]))
y_labels.append(Y[test_index, d])
y_prediction = np.array(y_prediction).flatten()
y_labels = np.array(y_labels).flatten()
tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()
test_se = tp / (tp + fn)
test_sp = tn / (tn + fp)
print("Val. Se = %s, Val. Sp = %s" % (round(test_sp, 4), round(test_se, 4)))
|
normal
|
{
"blob_id": "784b51c05dc7b5e70016634e2664c9ec25b8a65a",
"index": 6506,
"step-1": "<mask token>\n\n\nclass FDA_node(object):\n <mask token>\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n <mask token>\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n <mask token>\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_optimal_param(lda, x_train, y_train):\n probs_train = lda.predict_proba(x_train)[:, 1]\n y_train = [x for _, x in sorted(zip(probs_train, y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp / (tp + fn))\n Sp.append(tn / (tn + fp))\n mx = np.argmax(-(1 - np.array(Sp) - np.array(Se)))\n return probs_train[mx]\n\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs = lda.predict_proba(x)[:, 1]\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp += 1\n else:\n fp += 1\n elif y[j] == 1:\n fn += 1\n else:\n tn += 1\n return tp, fp, fn, tn\n\n\n<mask token>\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = probs <= self.m\n right = probs > self.m\n return left, right\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\n<mask token>\n",
"step-4": "import numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom preprocessing import *\nfrom utils import *\n\n\ndef find_optimal_param(lda, x_train, y_train):\n probs_train = lda.predict_proba(x_train)[:, 1]\n y_train = [x for _, x in sorted(zip(probs_train, y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp / (tp + fn))\n Sp.append(tn / (tn + fp))\n mx = np.argmax(-(1 - np.array(Sp) - np.array(Se)))\n return probs_train[mx]\n\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs = lda.predict_proba(x)[:, 1]\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp += 1\n else:\n fp += 1\n elif y[j] == 1:\n fn += 1\n else:\n tn += 1\n return tp, fp, fn, tn\n\n\nfrom methodutils import FdaUtils\n\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n def fit(self, x, y):\n self.method.fit(x, y)\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if max(y[left]) == 0 or min(y[right]) == 1:\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = probs <= self.m\n right = probs > self.m\n return left, right\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred = self.right.predict(x[right])\n pred = np.ones(x.shape[0]) * 2\n pred[left] = l_pred\n pred[right] = r_pred\n return pred\n\n\nif __name__ == '__main__':\n np.seterr(all='raise')\n from sklearn.metrics import confusion_matrix\n from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW\n from fisher_discriminant import FisherDiscriminantAnalisys\n num_components = 100\n infile = open(\n 'C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\data_old_and_new_without_noise.pkl'\n , 'rb')\n old, new = pkl.load(infile)\n infile.close()\n Y = old['y']\n outfile = open(\n 'C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\6002_old_Dif.pkl'\n , 'rb')\n X = pkl.load(outfile)\n outfile.close()\n pca = PCA(n_components=X.shape[0])\n b = pca.fit_transform(X)\n for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):\n y_prediction = []\n y_labels = []\n for train_index, test_index in cross_val(b.shape[0], 500):\n tree = FDA_node()\n tree.grow()\n tree.fit(b[train_index, :num_components], Y[train_index, d])\n tree.find_optimal_param(b[train_index, :num_components], Y[\n train_index, d])\n y_prediction.append(tree.predict(b[test_index, :num_components]))\n y_labels.append(Y[test_index, d])\n y_prediction = np.array(y_prediction).flatten()\n y_labels = np.array(y_labels).flatten()\n tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()\n test_se = tp / (tp + fn)\n test_sp = tn / (tn + fp)\n print('Val. Se = %s, Val. Sp = %s' % (round(test_sp, 4), round(\n test_se, 4)))\n",
"step-5": "import numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\nfrom preprocessing import *\nfrom utils import *\n\n\ndef find_optimal_param(lda, x_train, y_train):\n\n probs_train = lda.predict_proba(x_train)[:, 1]\n\n y_train = [x for _,x in sorted(zip(probs_train,y_train))]\n y_train = np.array(y_train)\n probs_train.sort()\n Se = []\n Sp = []\n for p in range(len(probs_train)):\n tp = np.count_nonzero(y_train[p:] == 1)\n fp = np.count_nonzero(y_train[p:] == 0)\n tn = np.count_nonzero(y_train[:p] == 0)\n fn = np.count_nonzero(y_train[:p] == 1)\n Se.append(tp/(tp+fn))\n Sp.append(tn/(tn+fp))\n\n mx = np.argmax(-(1-np.array(Sp) - np.array(Se)))\n\n return probs_train[mx]\n\ndef predict(lda, x, y, m):\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n if len(x) != 0:\n probs= lda.predict_proba(x)[:, 1]\n\n for j in range(len(x)):\n if probs[j] > m:\n if y[j] == 1:\n tp+=1\n else:\n fp+=1\n else:\n if y[j] == 1:\n fn +=1\n else:\n tn +=1\n\n return tp, fp, fn, tn\n\nfrom methodutils import FdaUtils\n\nclass FDA_node(object):\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self.method = FdaUtils()\n self.left = None\n self.right = None\n self.m = 0.5\n\n def grow(self):\n self.right = FDA_node()\n self.left = FDA_node()\n\n def find_optimal_param(self, x, y):\n self.m = self.method.find_optimal_param(x, y)\n\n\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n self.left.find_optimal_param(x[left], y[left])\n self.right.find_optimal_param(x[right], y[right])\n\n\n def fit(self, x, y):\n self.method.fit(x, y)\n\n if self.left != None and self.right != None:\n left, right = self.divide_data(x)\n if (max(y[left]) == 0 or min(y[right]) == 1):\n self.left = self.right = None\n else:\n self.right.fit(x[left], y[left])\n self.left.fit(x[right], y[right])\n\n\n def divide_data(self, x):\n probs = self.method.predict_proba(x)[:, 1]\n left = (probs <= self.m)\n right = (probs > self.m)\n return left, right\n\n\n def predict(self, x):\n if self.left == None and self.right == None:\n pred = self.method.predict(x, self.m)\n\n elif self.left != None and self.right != None:\n left, right = self.divide_data(x)\n l_pred = self.left.predict(x[left])\n r_pred =self.right.predict(x[right])\n pred = np.ones(x.shape[0])*2\n pred[left] = l_pred\n pred[right] = r_pred\n\n return pred\n\n\n\nif __name__ == \"__main__\":\n np.seterr(all='raise')\n from sklearn.metrics import confusion_matrix\n from dataset import load_dataset, load_new_dataset_6002, diagnosis_to_binary, MOST_FREQ_DIAGS_NUMS_NEW\n from fisher_discriminant import FisherDiscriminantAnalisys\n num_components = 100\n\n infile = open('C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\data_old_and_new_without_noise.pkl', 'rb')\n (old, new) = pkl.load(infile)\n infile.close()\n\n Y = old[\"y\"]\n outfile = open('C:\\\\Users\\\\donte_000\\\\PycharmProjects\\\\Basic_Methods\\\\data\\\\6002_old_Dif.pkl', 'rb')\n X = pkl.load(outfile)\n outfile.close()\n pca = PCA(n_components=X.shape[0])\n b = pca.fit_transform(X)\n\n\n\n for d in reversed(MOST_FREQ_DIAGS_NUMS_NEW):\n y_prediction =[]\n y_labels = []\n for train_index, test_index in cross_val(b.shape[0], 500):\n tree = FDA_node()\n tree.grow()\n tree.fit(b[train_index, :num_components],Y[train_index,d])\n tree.find_optimal_param(b[train_index, :num_components], Y[train_index,d])\n\n y_prediction.append(tree.predict(b[test_index, :num_components]))\n y_labels.append(Y[test_index, d])\n\n y_prediction = np.array(y_prediction).flatten()\n y_labels = np.array(y_labels).flatten()\n tn, fp, fn, tp = confusion_matrix(y_labels, y_prediction).ravel()\n\n test_se = tp / (tp + fn)\n test_sp = tn / (tn + fp)\n print(\"Val. Se = %s, Val. Sp = %s\" % (round(test_sp, 4), round(test_se, 4)))\n",
"step-ids": [
5,
6,
9,
11,
12
]
}
|
[
5,
6,
9,
11,
12
] |
from datetime import datetime
from django.core import mail
from entity_event import context_loader
from entity_emailer.models import Email
from entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \
create_email_message, extract_email_subject_from_html_content
class EntityEmailerInterface(object):
"""
An api interface to do things within entity emailer
"""
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(
scheduled__lte=current_time,
sent__isnull=True
).select_related(
'event'
).prefetch_related(
'recipients'
)
# Fetch the contexts of every event so that they may be rendered
context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(
to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address(),
subject=email.subject or extract_email_subject_from_html_content(html_message),
text=text_message,
html=html_message,
)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
# Get the email medium
email_medium = get_medium()
# Get the default from email
default_from_email = get_from_email_address()
# Find any unseen events and create unsent email objects
for event, targets in email_medium.events_targets(seen=False, mark_seen=True):
# Check the event's context for a from_address, otherwise fallback to default
from_address = event.context.get('from_address') or default_from_email
# Create the emails
Email.objects.create_email(event=event, from_address=from_address, recipients=targets)
|
normal
|
{
"blob_id": "d1dc807ecc92d9108db2c9bd00ee9781e174a1aa",
"index": 558,
"step-1": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-2": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n <mask token>\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-3": "<mask token>\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-4": "from datetime import datetime\nfrom django.core import mail\nfrom entity_event import context_loader\nfrom entity_emailer.models import Email\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"step-5": "from datetime import datetime\n\nfrom django.core import mail\nfrom entity_event import context_loader\n\nfrom entity_emailer.models import Email\n\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \\\n create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True\n ).select_related(\n 'event'\n ).prefetch_related(\n 'recipients'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n emails.append(message)\n\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n\n # Get the email medium\n email_medium = get_medium()\n\n # Get the default from email\n default_from_email = get_from_email_address()\n\n # Find any unseen events and create unsent email objects\n for event, targets in email_medium.events_targets(seen=False, mark_seen=True):\n\n # Check the event's context for a from_address, otherwise fallback to default\n from_address = event.context.get('from_address') or default_from_email\n\n # Create the emails\n Email.objects.create_email(event=event, from_address=from_address, recipients=targets)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Person:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def GetName(self):
return (self.fname + ' ' + self.lname)
|
normal
|
{
"blob_id": "ff358136bc96fa7f3eb41d019ddfd10fc4db8f0d",
"index": 5558,
"step-1": "<mask token>\n",
"step-2": "class Person:\n <mask token>\n <mask token>\n",
"step-3": "class Person:\n <mask token>\n\n def GetName(self):\n return self.fname + ' ' + self.lname\n",
"step-4": "class Person:\n\n def __init__(self, fname, lname):\n self.fname = fname\n self.lname = lname\n\n def GetName(self):\n return self.fname + ' ' + self.lname\n",
"step-5": "class Person:\n def __init__(self, fname, lname):\n self.fname = fname\n self.lname = lname\n\n def GetName(self):\n return (self.fname + ' ' + self.lname)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''a,b = input().split()
a, b = [int(a),int(b)]
List = set()
ArrayA = list(map(int, input().split()))
temp = 1
ArrayB = list(map(int, input().split()))
for i in range(max(ArrayA), min(ArrayB)+1):
for j in ArrayA:
if i%j is 1:
temp += 1
if temp is len(ArrayA):
List.add(i)
temp=1
newList = list(List)
temp = 1
newSet = set()
for i in newList:
for j in ArrayB:
if j%i==1:
temp+=1
if temp is len(ArrayB):
newSet.add(i)
temp=1
print(len(list(newSet)))
'''
'''nm = input().split( "-" )
a = (nm[1])
b = (nm[1])
print(nm)'''
'''x1, v1, x2, v2 = input().split()
x1, v1, x2, v2 = [int(x1),int(v1),int(x2),int(v2)]
if (x1<x2 and v1<v2) or (x2>x1 and v2>v1) or v1 is v2:
print("NO")
exit(1)
diff = 1
while True:
x1 += v1
x2 += v2
diff = x2 - x1
if diff < 1:
print("NO")
break
elif diff is 1:
print("YES")
break'''
#Graph Explaorartion
'''
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 1.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, menMeans, width, color='royalblue', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='seagreen', yerr=womenStd)
# add some
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[1], rects2[1]), ('Men', 'Women') )
plt.show()
'''
from math import gcd
# from functools import reduce
# for _ in range(int(input())):
# N = int(input())
# print(reduce(lambda x,y: x*y//gcd(x,y), range(1,N+1)))
import numpy as np
nk = input().split()
board = int(nk[0])
numberOfObs = int(nk[1])
roco = input().split()
obstacle = []
row = int(roco[0])
col = int(roco[1])
for _ in range(numberOfObs):
obs = input().split()
obstacle.append((int(obs[0]), int((obs[1]))))
#up
q = row
r = col
#down
s = row
t = col
#left
u = row
v = col
#right
w = row
x = col
#upper right
k = row
l = col
#lower left
i = row
j = col
#upperleft
m = row
n = col
#lower right
o = row
p = col
boxes = 0
while (1 <= q <= board) and (1 <= r <= board):
if (q, r) in obstacle:
break
else:
boxes += 1
q -= 1
while (1 <= s <= board) and (1 <= t <= board):
if (s, t) in obstacle:
break
else:
boxes += 1
s += 1
while (1 <= u <= board) and (1 <= v <= board):
if (u, v) in obstacle:
break
else:
boxes += 1
v -= 1
while (1 <= w <= board) and (1 <= x <= board):
if (w, x) in obstacle:
break
else:
boxes += 1
x += 1
while (1 <= o <= board) and (1 <= p <= board):
if (o, p) in obstacle:
break
else:
boxes += 1
o += 1
p += 1
while (1 <= m <= board) and (1 <= n <= board):
if (m, n) in obstacle:
break
else:
boxes += 1
m -= 1
n -= 1
while (1 <= k <= board) and (1 <= l <= board):
if (k, l) in obstacle:
break
else:
boxes += 1
k -= 1
l += 1
while (1 <= i <=board) and (1 <= j <= board):
if (i,j) in obstacle:
break
else:
boxes += 1
i += 1
j -= 1
print(boxes - 8)
|
normal
|
{
"blob_id": "73d02615863826d77d65fbf0314dc71acb97ef28",
"index": 4035,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(numberOfObs):\n obs = input().split()\n obstacle.append((int(obs[0]), int(obs[1])))\n<mask token>\nwhile 1 <= q <= board and 1 <= r <= board:\n if (q, r) in obstacle:\n break\n else:\n boxes += 1\n q -= 1\nwhile 1 <= s <= board and 1 <= t <= board:\n if (s, t) in obstacle:\n break\n else:\n boxes += 1\n s += 1\nwhile 1 <= u <= board and 1 <= v <= board:\n if (u, v) in obstacle:\n break\n else:\n boxes += 1\n v -= 1\nwhile 1 <= w <= board and 1 <= x <= board:\n if (w, x) in obstacle:\n break\n else:\n boxes += 1\n x += 1\nwhile 1 <= o <= board and 1 <= p <= board:\n if (o, p) in obstacle:\n break\n else:\n boxes += 1\n o += 1\n p += 1\nwhile 1 <= m <= board and 1 <= n <= board:\n if (m, n) in obstacle:\n break\n else:\n boxes += 1\n m -= 1\n n -= 1\nwhile 1 <= k <= board and 1 <= l <= board:\n if (k, l) in obstacle:\n break\n else:\n boxes += 1\n k -= 1\n l += 1\nwhile 1 <= i <= board and 1 <= j <= board:\n if (i, j) in obstacle:\n break\n else:\n boxes += 1\n i += 1\n j -= 1\nprint(boxes - 8)\n",
"step-3": "<mask token>\nnk = input().split()\nboard = int(nk[0])\nnumberOfObs = int(nk[1])\nroco = input().split()\nobstacle = []\nrow = int(roco[0])\ncol = int(roco[1])\nfor _ in range(numberOfObs):\n obs = input().split()\n obstacle.append((int(obs[0]), int(obs[1])))\nq = row\nr = col\ns = row\nt = col\nu = row\nv = col\nw = row\nx = col\nk = row\nl = col\ni = row\nj = col\nm = row\nn = col\no = row\np = col\nboxes = 0\nwhile 1 <= q <= board and 1 <= r <= board:\n if (q, r) in obstacle:\n break\n else:\n boxes += 1\n q -= 1\nwhile 1 <= s <= board and 1 <= t <= board:\n if (s, t) in obstacle:\n break\n else:\n boxes += 1\n s += 1\nwhile 1 <= u <= board and 1 <= v <= board:\n if (u, v) in obstacle:\n break\n else:\n boxes += 1\n v -= 1\nwhile 1 <= w <= board and 1 <= x <= board:\n if (w, x) in obstacle:\n break\n else:\n boxes += 1\n x += 1\nwhile 1 <= o <= board and 1 <= p <= board:\n if (o, p) in obstacle:\n break\n else:\n boxes += 1\n o += 1\n p += 1\nwhile 1 <= m <= board and 1 <= n <= board:\n if (m, n) in obstacle:\n break\n else:\n boxes += 1\n m -= 1\n n -= 1\nwhile 1 <= k <= board and 1 <= l <= board:\n if (k, l) in obstacle:\n break\n else:\n boxes += 1\n k -= 1\n l += 1\nwhile 1 <= i <= board and 1 <= j <= board:\n if (i, j) in obstacle:\n break\n else:\n boxes += 1\n i += 1\n j -= 1\nprint(boxes - 8)\n",
"step-4": "<mask token>\nfrom math import gcd\nimport numpy as np\nnk = input().split()\nboard = int(nk[0])\nnumberOfObs = int(nk[1])\nroco = input().split()\nobstacle = []\nrow = int(roco[0])\ncol = int(roco[1])\nfor _ in range(numberOfObs):\n obs = input().split()\n obstacle.append((int(obs[0]), int(obs[1])))\nq = row\nr = col\ns = row\nt = col\nu = row\nv = col\nw = row\nx = col\nk = row\nl = col\ni = row\nj = col\nm = row\nn = col\no = row\np = col\nboxes = 0\nwhile 1 <= q <= board and 1 <= r <= board:\n if (q, r) in obstacle:\n break\n else:\n boxes += 1\n q -= 1\nwhile 1 <= s <= board and 1 <= t <= board:\n if (s, t) in obstacle:\n break\n else:\n boxes += 1\n s += 1\nwhile 1 <= u <= board and 1 <= v <= board:\n if (u, v) in obstacle:\n break\n else:\n boxes += 1\n v -= 1\nwhile 1 <= w <= board and 1 <= x <= board:\n if (w, x) in obstacle:\n break\n else:\n boxes += 1\n x += 1\nwhile 1 <= o <= board and 1 <= p <= board:\n if (o, p) in obstacle:\n break\n else:\n boxes += 1\n o += 1\n p += 1\nwhile 1 <= m <= board and 1 <= n <= board:\n if (m, n) in obstacle:\n break\n else:\n boxes += 1\n m -= 1\n n -= 1\nwhile 1 <= k <= board and 1 <= l <= board:\n if (k, l) in obstacle:\n break\n else:\n boxes += 1\n k -= 1\n l += 1\nwhile 1 <= i <= board and 1 <= j <= board:\n if (i, j) in obstacle:\n break\n else:\n boxes += 1\n i += 1\n j -= 1\nprint(boxes - 8)\n",
"step-5": "'''a,b = input().split()\na, b = [int(a),int(b)]\nList = set()\nArrayA = list(map(int, input().split()))\ntemp = 1\nArrayB = list(map(int, input().split()))\nfor i in range(max(ArrayA), min(ArrayB)+1):\n for j in ArrayA:\n if i%j is 1:\n temp += 1\n\n if temp is len(ArrayA):\n List.add(i)\n temp=1\nnewList = list(List)\ntemp = 1\nnewSet = set()\nfor i in newList:\n for j in ArrayB:\n if j%i==1:\n temp+=1\n if temp is len(ArrayB):\n newSet.add(i)\n temp=1\n\nprint(len(list(newSet)))\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''nm = input().split( \"-\" )\na = (nm[1])\nb = (nm[1])\nprint(nm)'''\n\n\n\n\n\n\n\n'''x1, v1, x2, v2 = input().split()\nx1, v1, x2, v2 = [int(x1),int(v1),int(x2),int(v2)]\nif (x1<x2 and v1<v2) or (x2>x1 and v2>v1) or v1 is v2:\n print(\"NO\")\n exit(1)\ndiff = 1\nwhile True:\n x1 += v1\n x2 += v2\n diff = x2 - x1\n if diff < 1:\n print(\"NO\")\n break\n elif diff is 1:\n print(\"YES\")\n break'''\n#Graph Explaorartion\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nN = 5\nmenMeans = (20, 35, 30, 35, 27)\nmenStd = (2, 3, 4, 1, 2)\n\nind = np.arange(N) # the x locations for the groups\nwidth = 1.35 # the width of the bars\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nrects1 = ax.bar(ind, menMeans, width, color='royalblue', yerr=menStd)\n\nwomenMeans = (25, 32, 34, 20, 25)\nwomenStd = (3, 5, 2, 3, 3)\nrects2 = ax.bar(ind+width, womenMeans, width, color='seagreen', yerr=womenStd)\n\n# add some\nax.set_ylabel('Scores')\nax.set_title('Scores by group and gender')\nax.set_xticks(ind + width / 2)\nax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )\n\nax.legend( (rects1[1], rects2[1]), ('Men', 'Women') )\n\nplt.show()\n'''\nfrom math import gcd\n# from functools import reduce\n\n# for _ in range(int(input())):\n# N = int(input())\n# print(reduce(lambda x,y: x*y//gcd(x,y), range(1,N+1)))\nimport numpy as np\nnk = input().split()\nboard = int(nk[0])\nnumberOfObs = int(nk[1])\nroco = input().split()\nobstacle = []\nrow = int(roco[0])\ncol = int(roco[1])\nfor _ in range(numberOfObs):\n obs = input().split()\n obstacle.append((int(obs[0]), int((obs[1]))))\n#up\nq = row\nr = col\n#down\ns = row\nt = col\n#left\nu = row\nv = col\n#right\nw = row\nx = col\n#upper right\nk = row\nl = col\n#lower left\ni = row\nj = col\n#upperleft\nm = row\nn = col\n#lower right\no = row\np = col\nboxes = 0\nwhile (1 <= q <= board) and (1 <= r <= board):\n if (q, r) in obstacle:\n break\n else:\n boxes += 1\n q -= 1\nwhile (1 <= s <= board) and (1 <= t <= board):\n if (s, t) in obstacle:\n break\n else:\n\n\n boxes += 1\n s += 1\nwhile (1 <= u <= board) and (1 <= v <= board):\n if (u, v) in obstacle:\n break\n else:\n\n boxes += 1\n v -= 1\nwhile (1 <= w <= board) and (1 <= x <= board):\n if (w, x) in obstacle:\n break\n else:\n\n boxes += 1\n x += 1\nwhile (1 <= o <= board) and (1 <= p <= board):\n if (o, p) in obstacle:\n break\n else:\n\n boxes += 1\n o += 1\n p += 1\nwhile (1 <= m <= board) and (1 <= n <= board):\n if (m, n) in obstacle:\n break\n else:\n\n boxes += 1\n m -= 1\n n -= 1\nwhile (1 <= k <= board) and (1 <= l <= board):\n if (k, l) in obstacle:\n break\n else:\n\n boxes += 1\n k -= 1\n l += 1\nwhile (1 <= i <=board) and (1 <= j <= board):\n if (i,j) in obstacle:\n break\n else:\n boxes += 1\n i += 1\n j -= 1\nprint(boxes - 8)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from bs4 import BeautifulSoup
from bs4 import BeautifulSoup
import requests,pymysql,random,time
import http.cookiejar
from multiprocessing import Pool,Lock
def get_proxies_ip():
db = pymysql.connect("localhost","root","xxx","xxx",charset='utf8')
cursor = db.cursor()
sql = "SELECT * FROM proxies_info;"
proxies_list = []
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
proxy_ip = row[1]
proxy_port = str(row[2])
proxies_list.append(proxy_ip+':'+proxy_port)
except:
db.rollback()
db.close()
porxite = {
'http':'http://'+random.choice(proxies_list)
}
return porxite
def get_headers():
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
return random.choice(USER_AGENTS)
def handle():
global lock,session,GuangCai_Company_file
r_file = '1.csv'
w_file = 'w1.csv'
lock = Lock()
GuangCai_Company_file = open(w_file,'w')
headers= {'User-Agent': get_headers(),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Host':'www.gldjc.com',
'Origin':'http://www.gldjc.com',
'Referer':'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'}
login_data = {
'userName':'13296385392',
'password':'qazwsxedc'
}
login_url = 'http://www.gldjc.com/dologin'
# 建立一个会话,可以把同一用户的不同请求联系起来;直到会话结束都会自动处理cookies
session = requests.Session()
filename = 'cookie'
# 建立LWPCookieJar实例,可以存Set-Cookie3类型的文件。
# 而MozillaCookieJar类是存为'/.txt'格式的文件
session.cookies = http.cookiejar.LWPCookieJar(filename)
# 若本地有cookie则不用再post数据了
try:
session.cookies.load(filename=filename, ignore_discard=True)
except:
print('Cookie未加载!')
content = session.post(login_url,data=login_data,headers=headers)
# print(content.content)
# 保存cookie到本地
session.cookies.save(ignore_discard=True, ignore_expires=True)
info_tuple_list = []
with open(r_file,'r') as GuangCai_file:
for info in GuangCai_file.readlines():
firs_cate = info.split('\t')[0].strip()
secd_cate = info.split('\t')[1].strip()
thir_cate = info.split('\t')[2].strip()
cate_url = info.split('\t')[4].strip()
info_tuple_list.append((firs_cate,secd_cate,thir_cate,cate_url))
pool = Pool(1)
pool.map(get_info,info_tuple_list)
pool.close()
pool.join()
GuangCai_Company_file.close()
def get_info(info_tuple_list):
firs_cate = info_tuple_list[0].strip()
secd_cate = info_tuple_list[1].strip()
thir_cate = info_tuple_list[2].strip()
cate_url = info_tuple_list[3].strip()
time.sleep(2)
print(cate_url)
headers = {
'User-Agent': get_headers(),
}
try:
req = session.get(cate_url,allow_redirects=False,headers=headers,proxies=get_proxies_ip(),timeout=40)
req.encoding = 'utf-8'
# print(req.text)
soup = BeautifulSoup(req.text,'html.parser')
# 具体详情页的spu
for next_page_id in soup.select('#a_checkMore'):
spu_id = next_page_id['onclick'].split("'")[1]
lock.acquire()
GuangCai_Company_file.write(firs_cate+'\t'+secd_cate+'\t'+thir_cate+'\t'+cate_url+'\t'+spu_id+'\n')
GuangCai_Company_file.flush()
lock.release()
print(spu_id)
except Exception as e:
lock.acquire()
with open('error.csv','a') as error_fil:
error_fil.write(cate_url+'\n')
lock.release()
print(e)
handle()
# with open('tehx.html','r') as tehx_file:
# soup = BeautifulSoup(tehx_file.read(),'html.parser')
# for next_page_id in soup.select('#a_checkMore'):
# print(next_page_id['onclick'].split("'")[1])
|
normal
|
{
"blob_id": "d49aa03cd6b8ba94d68a1bc1e064f77fded65000",
"index": 8870,
"step-1": "<mask token>\n\n\ndef get_headers():\n USER_AGENTS = [\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n , 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)'\n ,\n 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0'\n ,\n 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'\n ,\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'\n ,\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre'\n ,\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n ,\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'\n ]\n return random.choice(USER_AGENTS)\n\n\ndef handle():\n global lock, session, GuangCai_Company_file\n r_file = '1.csv'\n w_file = 'w1.csv'\n lock = Lock()\n GuangCai_Company_file = open(w_file, 'w')\n headers = {'User-Agent': get_headers(), 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate',\n 'Host': 'www.gldjc.com', 'Origin': 'http://www.gldjc.com',\n 'Referer':\n 'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'\n }\n login_data = {'userName': '13296385392', 'password': 'qazwsxedc'}\n login_url = 'http://www.gldjc.com/dologin'\n session = requests.Session()\n filename = 'cookie'\n session.cookies = http.cookiejar.LWPCookieJar(filename)\n try:\n session.cookies.load(filename=filename, ignore_discard=True)\n except:\n print('Cookie未加载!')\n content = session.post(login_url, data=login_data, headers=headers)\n session.cookies.save(ignore_discard=True, ignore_expires=True)\n info_tuple_list = []\n with open(r_file, 'r') as GuangCai_file:\n for info in GuangCai_file.readlines():\n firs_cate = info.split('\\t')[0].strip()\n secd_cate = info.split('\\t')[1].strip()\n thir_cate = info.split('\\t')[2].strip()\n cate_url = info.split('\\t')[4].strip()\n info_tuple_list.append((firs_cate, secd_cate, thir_cate, cate_url))\n pool = Pool(1)\n pool.map(get_info, info_tuple_list)\n pool.close()\n pool.join()\n GuangCai_Company_file.close()\n\n\ndef get_info(info_tuple_list):\n firs_cate = info_tuple_list[0].strip()\n secd_cate = info_tuple_list[1].strip()\n thir_cate = info_tuple_list[2].strip()\n cate_url = info_tuple_list[3].strip()\n time.sleep(2)\n print(cate_url)\n headers = {'User-Agent': get_headers()}\n try:\n req = session.get(cate_url, allow_redirects=False, headers=headers,\n proxies=get_proxies_ip(), timeout=40)\n req.encoding = 'utf-8'\n soup = BeautifulSoup(req.text, 'html.parser')\n for next_page_id in soup.select('#a_checkMore'):\n spu_id = next_page_id['onclick'].split(\"'\")[1]\n lock.acquire()\n GuangCai_Company_file.write(firs_cate + '\\t' + secd_cate + '\\t' +\n thir_cate + '\\t' + cate_url + '\\t' + spu_id + '\\n')\n GuangCai_Company_file.flush()\n lock.release()\n print(spu_id)\n except Exception as e:\n lock.acquire()\n with open('error.csv', 'a') as error_fil:\n error_fil.write(cate_url + '\\n')\n lock.release()\n print(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_proxies_ip():\n db = pymysql.connect('localhost', 'root', 'xxx', 'xxx', charset='utf8')\n cursor = db.cursor()\n sql = 'SELECT * FROM proxies_info;'\n proxies_list = []\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n proxy_ip = row[1]\n proxy_port = str(row[2])\n proxies_list.append(proxy_ip + ':' + proxy_port)\n except:\n db.rollback()\n db.close()\n porxite = {'http': 'http://' + random.choice(proxies_list)}\n return porxite\n\n\ndef get_headers():\n USER_AGENTS = [\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n , 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)'\n ,\n 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0'\n ,\n 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'\n ,\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'\n ,\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre'\n ,\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n ,\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'\n ]\n return random.choice(USER_AGENTS)\n\n\ndef handle():\n global lock, session, GuangCai_Company_file\n r_file = '1.csv'\n w_file = 'w1.csv'\n lock = Lock()\n GuangCai_Company_file = open(w_file, 'w')\n headers = {'User-Agent': get_headers(), 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate',\n 'Host': 'www.gldjc.com', 'Origin': 'http://www.gldjc.com',\n 'Referer':\n 'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'\n }\n login_data = {'userName': '13296385392', 'password': 'qazwsxedc'}\n login_url = 'http://www.gldjc.com/dologin'\n session = requests.Session()\n filename = 'cookie'\n session.cookies = http.cookiejar.LWPCookieJar(filename)\n try:\n session.cookies.load(filename=filename, ignore_discard=True)\n except:\n print('Cookie未加载!')\n content = session.post(login_url, data=login_data, headers=headers)\n session.cookies.save(ignore_discard=True, ignore_expires=True)\n info_tuple_list = []\n with open(r_file, 'r') as GuangCai_file:\n for info in GuangCai_file.readlines():\n firs_cate = info.split('\\t')[0].strip()\n secd_cate = info.split('\\t')[1].strip()\n thir_cate = info.split('\\t')[2].strip()\n cate_url = info.split('\\t')[4].strip()\n info_tuple_list.append((firs_cate, secd_cate, thir_cate, cate_url))\n pool = Pool(1)\n pool.map(get_info, info_tuple_list)\n pool.close()\n pool.join()\n GuangCai_Company_file.close()\n\n\ndef get_info(info_tuple_list):\n firs_cate = info_tuple_list[0].strip()\n secd_cate = info_tuple_list[1].strip()\n thir_cate = info_tuple_list[2].strip()\n cate_url = info_tuple_list[3].strip()\n time.sleep(2)\n print(cate_url)\n headers = {'User-Agent': get_headers()}\n try:\n req = session.get(cate_url, allow_redirects=False, headers=headers,\n proxies=get_proxies_ip(), timeout=40)\n req.encoding = 'utf-8'\n soup = BeautifulSoup(req.text, 'html.parser')\n for next_page_id in soup.select('#a_checkMore'):\n spu_id = next_page_id['onclick'].split(\"'\")[1]\n lock.acquire()\n GuangCai_Company_file.write(firs_cate + '\\t' + secd_cate + '\\t' +\n thir_cate + '\\t' + cate_url + '\\t' + spu_id + '\\n')\n GuangCai_Company_file.flush()\n lock.release()\n print(spu_id)\n except Exception as e:\n lock.acquire()\n with open('error.csv', 'a') as error_fil:\n error_fil.write(cate_url + '\\n')\n lock.release()\n print(e)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_proxies_ip():\n db = pymysql.connect('localhost', 'root', 'xxx', 'xxx', charset='utf8')\n cursor = db.cursor()\n sql = 'SELECT * FROM proxies_info;'\n proxies_list = []\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n proxy_ip = row[1]\n proxy_port = str(row[2])\n proxies_list.append(proxy_ip + ':' + proxy_port)\n except:\n db.rollback()\n db.close()\n porxite = {'http': 'http://' + random.choice(proxies_list)}\n return porxite\n\n\ndef get_headers():\n USER_AGENTS = [\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n , 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)'\n ,\n 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0'\n ,\n 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'\n ,\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'\n ,\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre'\n ,\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n ,\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'\n ]\n return random.choice(USER_AGENTS)\n\n\ndef handle():\n global lock, session, GuangCai_Company_file\n r_file = '1.csv'\n w_file = 'w1.csv'\n lock = Lock()\n GuangCai_Company_file = open(w_file, 'w')\n headers = {'User-Agent': get_headers(), 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate',\n 'Host': 'www.gldjc.com', 'Origin': 'http://www.gldjc.com',\n 'Referer':\n 'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'\n }\n login_data = {'userName': '13296385392', 'password': 'qazwsxedc'}\n login_url = 'http://www.gldjc.com/dologin'\n session = requests.Session()\n filename = 'cookie'\n session.cookies = http.cookiejar.LWPCookieJar(filename)\n try:\n session.cookies.load(filename=filename, ignore_discard=True)\n except:\n print('Cookie未加载!')\n content = session.post(login_url, data=login_data, headers=headers)\n session.cookies.save(ignore_discard=True, ignore_expires=True)\n info_tuple_list = []\n with open(r_file, 'r') as GuangCai_file:\n for info in GuangCai_file.readlines():\n firs_cate = info.split('\\t')[0].strip()\n secd_cate = info.split('\\t')[1].strip()\n thir_cate = info.split('\\t')[2].strip()\n cate_url = info.split('\\t')[4].strip()\n info_tuple_list.append((firs_cate, secd_cate, thir_cate, cate_url))\n pool = Pool(1)\n pool.map(get_info, info_tuple_list)\n pool.close()\n pool.join()\n GuangCai_Company_file.close()\n\n\ndef get_info(info_tuple_list):\n firs_cate = info_tuple_list[0].strip()\n secd_cate = info_tuple_list[1].strip()\n thir_cate = info_tuple_list[2].strip()\n cate_url = info_tuple_list[3].strip()\n time.sleep(2)\n print(cate_url)\n headers = {'User-Agent': get_headers()}\n try:\n req = session.get(cate_url, allow_redirects=False, headers=headers,\n proxies=get_proxies_ip(), timeout=40)\n req.encoding = 'utf-8'\n soup = BeautifulSoup(req.text, 'html.parser')\n for next_page_id in soup.select('#a_checkMore'):\n spu_id = next_page_id['onclick'].split(\"'\")[1]\n lock.acquire()\n GuangCai_Company_file.write(firs_cate + '\\t' + secd_cate + '\\t' +\n thir_cate + '\\t' + cate_url + '\\t' + spu_id + '\\n')\n GuangCai_Company_file.flush()\n lock.release()\n print(spu_id)\n except Exception as e:\n lock.acquire()\n with open('error.csv', 'a') as error_fil:\n error_fil.write(cate_url + '\\n')\n lock.release()\n print(e)\n\n\nhandle()\n",
"step-4": "from bs4 import BeautifulSoup\nfrom bs4 import BeautifulSoup\nimport requests, pymysql, random, time\nimport http.cookiejar\nfrom multiprocessing import Pool, Lock\n\n\ndef get_proxies_ip():\n db = pymysql.connect('localhost', 'root', 'xxx', 'xxx', charset='utf8')\n cursor = db.cursor()\n sql = 'SELECT * FROM proxies_info;'\n proxies_list = []\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n proxy_ip = row[1]\n proxy_port = str(row[2])\n proxies_list.append(proxy_ip + ':' + proxy_port)\n except:\n db.rollback()\n db.close()\n porxite = {'http': 'http://' + random.choice(proxies_list)}\n return porxite\n\n\ndef get_headers():\n USER_AGENTS = [\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)'\n , 'Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)'\n ,\n 'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1'\n ,\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0'\n ,\n 'Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5',\n 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'\n ,\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20'\n ,\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)'\n ,\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1'\n ,\n 'Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre'\n ,\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0'\n ,\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11'\n ,\n 'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10'\n ]\n return random.choice(USER_AGENTS)\n\n\ndef handle():\n global lock, session, GuangCai_Company_file\n r_file = '1.csv'\n w_file = 'w1.csv'\n lock = Lock()\n GuangCai_Company_file = open(w_file, 'w')\n headers = {'User-Agent': get_headers(), 'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive', 'Accept-Encoding': 'gzip, deflate',\n 'Host': 'www.gldjc.com', 'Origin': 'http://www.gldjc.com',\n 'Referer':\n 'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'\n }\n login_data = {'userName': '13296385392', 'password': 'qazwsxedc'}\n login_url = 'http://www.gldjc.com/dologin'\n session = requests.Session()\n filename = 'cookie'\n session.cookies = http.cookiejar.LWPCookieJar(filename)\n try:\n session.cookies.load(filename=filename, ignore_discard=True)\n except:\n print('Cookie未加载!')\n content = session.post(login_url, data=login_data, headers=headers)\n session.cookies.save(ignore_discard=True, ignore_expires=True)\n info_tuple_list = []\n with open(r_file, 'r') as GuangCai_file:\n for info in GuangCai_file.readlines():\n firs_cate = info.split('\\t')[0].strip()\n secd_cate = info.split('\\t')[1].strip()\n thir_cate = info.split('\\t')[2].strip()\n cate_url = info.split('\\t')[4].strip()\n info_tuple_list.append((firs_cate, secd_cate, thir_cate, cate_url))\n pool = Pool(1)\n pool.map(get_info, info_tuple_list)\n pool.close()\n pool.join()\n GuangCai_Company_file.close()\n\n\ndef get_info(info_tuple_list):\n firs_cate = info_tuple_list[0].strip()\n secd_cate = info_tuple_list[1].strip()\n thir_cate = info_tuple_list[2].strip()\n cate_url = info_tuple_list[3].strip()\n time.sleep(2)\n print(cate_url)\n headers = {'User-Agent': get_headers()}\n try:\n req = session.get(cate_url, allow_redirects=False, headers=headers,\n proxies=get_proxies_ip(), timeout=40)\n req.encoding = 'utf-8'\n soup = BeautifulSoup(req.text, 'html.parser')\n for next_page_id in soup.select('#a_checkMore'):\n spu_id = next_page_id['onclick'].split(\"'\")[1]\n lock.acquire()\n GuangCai_Company_file.write(firs_cate + '\\t' + secd_cate + '\\t' +\n thir_cate + '\\t' + cate_url + '\\t' + spu_id + '\\n')\n GuangCai_Company_file.flush()\n lock.release()\n print(spu_id)\n except Exception as e:\n lock.acquire()\n with open('error.csv', 'a') as error_fil:\n error_fil.write(cate_url + '\\n')\n lock.release()\n print(e)\n\n\nhandle()\n",
"step-5": "from bs4 import BeautifulSoup\n\nfrom bs4 import BeautifulSoup\nimport requests,pymysql,random,time\nimport http.cookiejar\nfrom multiprocessing import Pool,Lock\n\ndef get_proxies_ip():\n db = pymysql.connect(\"localhost\",\"root\",\"xxx\",\"xxx\",charset='utf8')\n cursor = db.cursor()\n sql = \"SELECT * FROM proxies_info;\"\n proxies_list = []\n try:\n cursor.execute(sql)\n results = cursor.fetchall()\n for row in results:\n proxy_ip = row[1]\n proxy_port = str(row[2])\n proxies_list.append(proxy_ip+':'+proxy_port)\n except:\n db.rollback()\n db.close()\n porxite = {\n 'http':'http://'+random.choice(proxies_list)\n }\n return porxite\n\ndef get_headers():\n USER_AGENTS = [\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\",\n \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)\",\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)\",\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)\",\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)\",\n \"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1\",\n \"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5\",\n \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre\",\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11\",\n \"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10\"\n ]\n return random.choice(USER_AGENTS)\n\ndef handle():\n global lock,session,GuangCai_Company_file\n r_file = '1.csv'\n w_file = 'w1.csv'\n lock = Lock()\n GuangCai_Company_file = open(w_file,'w')\n headers= {'User-Agent': get_headers(),\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',\n 'Connection': 'keep-alive',\n 'Accept-Encoding': 'gzip, deflate',\n 'Host':'www.gldjc.com',\n 'Origin':'http://www.gldjc.com',\n 'Referer':'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'}\n\n login_data = {\n 'userName':'13296385392',\n 'password':'qazwsxedc'\n }\n login_url = 'http://www.gldjc.com/dologin'\n\n # 建立一个会话,可以把同一用户的不同请求联系起来;直到会话结束都会自动处理cookies\n session = requests.Session()\n filename = 'cookie'\n # 建立LWPCookieJar实例,可以存Set-Cookie3类型的文件。\n # 而MozillaCookieJar类是存为'/.txt'格式的文件\n session.cookies = http.cookiejar.LWPCookieJar(filename)\n # 若本地有cookie则不用再post数据了\n try:\n session.cookies.load(filename=filename, ignore_discard=True)\n except:\n print('Cookie未加载!')\n\n content = session.post(login_url,data=login_data,headers=headers)\n # print(content.content)\n # 保存cookie到本地\n session.cookies.save(ignore_discard=True, ignore_expires=True)\n info_tuple_list = []\n with open(r_file,'r') as GuangCai_file:\n for info in GuangCai_file.readlines():\n firs_cate = info.split('\\t')[0].strip()\n secd_cate = info.split('\\t')[1].strip()\n thir_cate = info.split('\\t')[2].strip()\n cate_url = info.split('\\t')[4].strip()\n info_tuple_list.append((firs_cate,secd_cate,thir_cate,cate_url))\n\n pool = Pool(1)\n pool.map(get_info,info_tuple_list)\n pool.close()\n pool.join()\n GuangCai_Company_file.close()\n\ndef get_info(info_tuple_list):\n firs_cate = info_tuple_list[0].strip()\n secd_cate = info_tuple_list[1].strip()\n thir_cate = info_tuple_list[2].strip()\n cate_url = info_tuple_list[3].strip()\n time.sleep(2)\n print(cate_url)\n headers = {\n 'User-Agent': get_headers(),\n }\n try:\n req = session.get(cate_url,allow_redirects=False,headers=headers,proxies=get_proxies_ip(),timeout=40)\n req.encoding = 'utf-8'\n # print(req.text)\n soup = BeautifulSoup(req.text,'html.parser')\n # 具体详情页的spu\n for next_page_id in soup.select('#a_checkMore'):\n spu_id = next_page_id['onclick'].split(\"'\")[1]\n lock.acquire()\n GuangCai_Company_file.write(firs_cate+'\\t'+secd_cate+'\\t'+thir_cate+'\\t'+cate_url+'\\t'+spu_id+'\\n')\n GuangCai_Company_file.flush()\n lock.release()\n print(spu_id)\n\n except Exception as e:\n lock.acquire()\n with open('error.csv','a') as error_fil:\n error_fil.write(cate_url+'\\n')\n lock.release()\n print(e)\n\n\nhandle()\n\n# with open('tehx.html','r') as tehx_file:\n# soup = BeautifulSoup(tehx_file.read(),'html.parser')\n# for next_page_id in soup.select('#a_checkMore'):\n# print(next_page_id['onclick'].split(\"'\")[1])\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# -*- coding: utf-8 -*-
import serial
import time
import argparse
def write_command(serial, comm, verbose = False, dt = None):
""" Encodes a command and sends it over the serial port """
if verbose and comm != "":
if dt is None:
print("{} \t\t-> {}".format(comm, serial.port))
else:
print("{} \t\t-> {} at {:2.3f} ms".format(comm, serial.port, dt))
serial.write(comm.encode())
def read_buffer(serial):
""" Reads the serial port bufer and decodes it """
resp = serial.read_all()
return resp.decode()
def read_and_print(serial):
""" Obtains serial responser and prints it if it's not empty """
resp = read_buffer(serial)
if resp != "":
print(resp)
def runcommands(cs, ts, ps, serials, verbose = False, profiling = False):
""" Runs a series of commands at certain specified times """
if len(ts) == len(cs):
i = 0
t0 = time.time()
dt = time.time() - t0 # elapsed time
while i < len(cs):
ser = serials[ps[i]]
comm = cs[i]
t = ts[i]
while (dt - t) < 0.0005:
dt = time.time() - t0
if verbose: read_and_print(ser)
if profiling:
write_command(ser, comm, verbose, dt)
else:
write_command(ser, comm, verbose)
i += 1
else:
print('Error: Lists are not equally long. ')
def load_csv(f):
delimiter = ','
ts = []
cs = []
ps = []
for l in f.readlines():
values = l.strip("\n").split(delimiter)
ts.append(float(values[0]))
cs.append(values[1])
if len(values) <= 3: # if there isn't a third field
values.append("") # add an empty one
p = values[2].strip(" ") # take all spaces out
if p == "":
ps.append(ps[-1]) # use previous one if it's empty
else:
ps.append(p)
return ts, cs, ps
# Create argument parser
parser = argparse.ArgumentParser(description='sends a series of commands over the serial port')
parser.add_argument('filename',
type=str, help='CSV file with columns for time, commands and ports')
parser.add_argument('-r', '--reps', required = False, default=1,
type=int, help='Number of command sequence repetitions (default: %(default)s)')
parser.add_argument('-bd', '--baudrate', required = False, default=38400,
type=int, help='Baudrate (default: %(default)s)')
parser.add_argument('-v', '--verbose', required = False,
action='store_true',
help='Print Commands as they are sent (default: %(default)s)')
parser.add_argument('-p', '--profiling', required = False,
action='store_true',
help='Show profiling information if verbose (default: %(default)s).')
# Get parameters
args = parser.parse_args()
#print(args.filename)
#print(args.reps)
#print(args.baudrate)
#print(args.verbose)
#print(args.profiling)
# Parameters
fname = args.filename
reps = args.reps
baudrate = args.baudrate
verbose = args.verbose
profiling = args.profiling
# test.csv -r 2 -b 38400 -v -p
#fname = 'test.csv'
#reps = 2
#baudrate = 38400
#verbose = True
#profiling = True
try:
f = open(fname, 'r')
ts, cs, ps = load_csv(f)
# Repeat all lists the specified number of times
ts_rep = [] # offset each rep's times
for r in range(reps):
for t in ts:
ts_rep.append(t + ts[-1]*r)
cs_rep = cs*reps
ps_reps = ps*reps
# Try to open the serial port connections and run the commands
try:
# Get list of unique portnames
ports = list(set(ps))
serials = {} # serial connections
for port in ports:
ser = serial.Serial(port = port,
baudrate=baudrate,
write_timeout=0,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
parity=serial.PARITY_NONE)
serials[port] = ser
runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)
finally:
time.sleep(0.5)
for ser in serials.values():
ser.close()
finally:
f.close()
|
normal
|
{
"blob_id": "3ffcab4b36c6ca05f1e667c628ebb873ebdc0d25",
"index": 7866,
"step-1": "<mask token>\n\n\ndef write_command(serial, comm, verbose=False, dt=None):\n \"\"\" Encodes a command and sends it over the serial port \"\"\"\n if verbose and comm != '':\n if dt is None:\n print('{} \\t\\t-> {}'.format(comm, serial.port))\n else:\n print('{} \\t\\t-> {} at {:2.3f} ms'.format(comm, serial.port, dt))\n serial.write(comm.encode())\n\n\ndef read_buffer(serial):\n \"\"\" Reads the serial port bufer and decodes it \"\"\"\n resp = serial.read_all()\n return resp.decode()\n\n\ndef read_and_print(serial):\n \"\"\" Obtains serial responser and prints it if it's not empty \"\"\"\n resp = read_buffer(serial)\n if resp != '':\n print(resp)\n\n\ndef runcommands(cs, ts, ps, serials, verbose=False, profiling=False):\n \"\"\" Runs a series of commands at certain specified times \"\"\"\n if len(ts) == len(cs):\n i = 0\n t0 = time.time()\n dt = time.time() - t0\n while i < len(cs):\n ser = serials[ps[i]]\n comm = cs[i]\n t = ts[i]\n while dt - t < 0.0005:\n dt = time.time() - t0\n if verbose:\n read_and_print(ser)\n if profiling:\n write_command(ser, comm, verbose, dt)\n else:\n write_command(ser, comm, verbose)\n i += 1\n else:\n print('Error: Lists are not equally long. ')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_command(serial, comm, verbose=False, dt=None):\n \"\"\" Encodes a command and sends it over the serial port \"\"\"\n if verbose and comm != '':\n if dt is None:\n print('{} \\t\\t-> {}'.format(comm, serial.port))\n else:\n print('{} \\t\\t-> {} at {:2.3f} ms'.format(comm, serial.port, dt))\n serial.write(comm.encode())\n\n\ndef read_buffer(serial):\n \"\"\" Reads the serial port bufer and decodes it \"\"\"\n resp = serial.read_all()\n return resp.decode()\n\n\ndef read_and_print(serial):\n \"\"\" Obtains serial responser and prints it if it's not empty \"\"\"\n resp = read_buffer(serial)\n if resp != '':\n print(resp)\n\n\ndef runcommands(cs, ts, ps, serials, verbose=False, profiling=False):\n \"\"\" Runs a series of commands at certain specified times \"\"\"\n if len(ts) == len(cs):\n i = 0\n t0 = time.time()\n dt = time.time() - t0\n while i < len(cs):\n ser = serials[ps[i]]\n comm = cs[i]\n t = ts[i]\n while dt - t < 0.0005:\n dt = time.time() - t0\n if verbose:\n read_and_print(ser)\n if profiling:\n write_command(ser, comm, verbose, dt)\n else:\n write_command(ser, comm, verbose)\n i += 1\n else:\n print('Error: Lists are not equally long. ')\n\n\ndef load_csv(f):\n delimiter = ','\n ts = []\n cs = []\n ps = []\n for l in f.readlines():\n values = l.strip('\\n').split(delimiter)\n ts.append(float(values[0]))\n cs.append(values[1])\n if len(values) <= 3:\n values.append('')\n p = values[2].strip(' ')\n if p == '':\n ps.append(ps[-1])\n else:\n ps.append(p)\n return ts, cs, ps\n\n\n<mask token>\nparser.add_argument('filename', type=str, help=\n 'CSV file with columns for time, commands and ports')\nparser.add_argument('-r', '--reps', required=False, default=1, type=int,\n help='Number of command sequence repetitions (default: %(default)s)')\nparser.add_argument('-bd', '--baudrate', required=False, default=38400,\n type=int, help='Baudrate (default: %(default)s)')\nparser.add_argument('-v', '--verbose', required=False, action='store_true',\n help='Print Commands as they are sent (default: %(default)s)')\nparser.add_argument('-p', '--profiling', required=False, action=\n 'store_true', help=\n 'Show profiling information if verbose (default: %(default)s).')\n<mask token>\ntry:\n f = open(fname, 'r')\n ts, cs, ps = load_csv(f)\n ts_rep = []\n for r in range(reps):\n for t in ts:\n ts_rep.append(t + ts[-1] * r)\n cs_rep = cs * reps\n ps_reps = ps * reps\n try:\n ports = list(set(ps))\n serials = {}\n for port in ports:\n ser = serial.Serial(port=port, baudrate=baudrate, write_timeout\n =0, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE,\n parity=serial.PARITY_NONE)\n serials[port] = ser\n runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)\n finally:\n time.sleep(0.5)\n for ser in serials.values():\n ser.close()\nfinally:\n f.close()\n",
"step-3": "<mask token>\n\n\ndef write_command(serial, comm, verbose=False, dt=None):\n \"\"\" Encodes a command and sends it over the serial port \"\"\"\n if verbose and comm != '':\n if dt is None:\n print('{} \\t\\t-> {}'.format(comm, serial.port))\n else:\n print('{} \\t\\t-> {} at {:2.3f} ms'.format(comm, serial.port, dt))\n serial.write(comm.encode())\n\n\ndef read_buffer(serial):\n \"\"\" Reads the serial port bufer and decodes it \"\"\"\n resp = serial.read_all()\n return resp.decode()\n\n\ndef read_and_print(serial):\n \"\"\" Obtains serial responser and prints it if it's not empty \"\"\"\n resp = read_buffer(serial)\n if resp != '':\n print(resp)\n\n\ndef runcommands(cs, ts, ps, serials, verbose=False, profiling=False):\n \"\"\" Runs a series of commands at certain specified times \"\"\"\n if len(ts) == len(cs):\n i = 0\n t0 = time.time()\n dt = time.time() - t0\n while i < len(cs):\n ser = serials[ps[i]]\n comm = cs[i]\n t = ts[i]\n while dt - t < 0.0005:\n dt = time.time() - t0\n if verbose:\n read_and_print(ser)\n if profiling:\n write_command(ser, comm, verbose, dt)\n else:\n write_command(ser, comm, verbose)\n i += 1\n else:\n print('Error: Lists are not equally long. ')\n\n\ndef load_csv(f):\n delimiter = ','\n ts = []\n cs = []\n ps = []\n for l in f.readlines():\n values = l.strip('\\n').split(delimiter)\n ts.append(float(values[0]))\n cs.append(values[1])\n if len(values) <= 3:\n values.append('')\n p = values[2].strip(' ')\n if p == '':\n ps.append(ps[-1])\n else:\n ps.append(p)\n return ts, cs, ps\n\n\nparser = argparse.ArgumentParser(description=\n 'sends a series of commands over the serial port')\nparser.add_argument('filename', type=str, help=\n 'CSV file with columns for time, commands and ports')\nparser.add_argument('-r', '--reps', required=False, default=1, type=int,\n help='Number of command sequence repetitions (default: %(default)s)')\nparser.add_argument('-bd', '--baudrate', required=False, default=38400,\n type=int, help='Baudrate (default: %(default)s)')\nparser.add_argument('-v', '--verbose', required=False, action='store_true',\n help='Print Commands as they are sent (default: %(default)s)')\nparser.add_argument('-p', '--profiling', required=False, action=\n 'store_true', help=\n 'Show profiling information if verbose (default: %(default)s).')\nargs = parser.parse_args()\nfname = args.filename\nreps = args.reps\nbaudrate = args.baudrate\nverbose = args.verbose\nprofiling = args.profiling\ntry:\n f = open(fname, 'r')\n ts, cs, ps = load_csv(f)\n ts_rep = []\n for r in range(reps):\n for t in ts:\n ts_rep.append(t + ts[-1] * r)\n cs_rep = cs * reps\n ps_reps = ps * reps\n try:\n ports = list(set(ps))\n serials = {}\n for port in ports:\n ser = serial.Serial(port=port, baudrate=baudrate, write_timeout\n =0, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE,\n parity=serial.PARITY_NONE)\n serials[port] = ser\n runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)\n finally:\n time.sleep(0.5)\n for ser in serials.values():\n ser.close()\nfinally:\n f.close()\n",
"step-4": "import serial\nimport time\nimport argparse\n\n\ndef write_command(serial, comm, verbose=False, dt=None):\n \"\"\" Encodes a command and sends it over the serial port \"\"\"\n if verbose and comm != '':\n if dt is None:\n print('{} \\t\\t-> {}'.format(comm, serial.port))\n else:\n print('{} \\t\\t-> {} at {:2.3f} ms'.format(comm, serial.port, dt))\n serial.write(comm.encode())\n\n\ndef read_buffer(serial):\n \"\"\" Reads the serial port bufer and decodes it \"\"\"\n resp = serial.read_all()\n return resp.decode()\n\n\ndef read_and_print(serial):\n \"\"\" Obtains serial responser and prints it if it's not empty \"\"\"\n resp = read_buffer(serial)\n if resp != '':\n print(resp)\n\n\ndef runcommands(cs, ts, ps, serials, verbose=False, profiling=False):\n \"\"\" Runs a series of commands at certain specified times \"\"\"\n if len(ts) == len(cs):\n i = 0\n t0 = time.time()\n dt = time.time() - t0\n while i < len(cs):\n ser = serials[ps[i]]\n comm = cs[i]\n t = ts[i]\n while dt - t < 0.0005:\n dt = time.time() - t0\n if verbose:\n read_and_print(ser)\n if profiling:\n write_command(ser, comm, verbose, dt)\n else:\n write_command(ser, comm, verbose)\n i += 1\n else:\n print('Error: Lists are not equally long. ')\n\n\ndef load_csv(f):\n delimiter = ','\n ts = []\n cs = []\n ps = []\n for l in f.readlines():\n values = l.strip('\\n').split(delimiter)\n ts.append(float(values[0]))\n cs.append(values[1])\n if len(values) <= 3:\n values.append('')\n p = values[2].strip(' ')\n if p == '':\n ps.append(ps[-1])\n else:\n ps.append(p)\n return ts, cs, ps\n\n\nparser = argparse.ArgumentParser(description=\n 'sends a series of commands over the serial port')\nparser.add_argument('filename', type=str, help=\n 'CSV file with columns for time, commands and ports')\nparser.add_argument('-r', '--reps', required=False, default=1, type=int,\n help='Number of command sequence repetitions (default: %(default)s)')\nparser.add_argument('-bd', '--baudrate', required=False, default=38400,\n type=int, help='Baudrate (default: %(default)s)')\nparser.add_argument('-v', '--verbose', required=False, action='store_true',\n help='Print Commands as they are sent (default: %(default)s)')\nparser.add_argument('-p', '--profiling', required=False, action=\n 'store_true', help=\n 'Show profiling information if verbose (default: %(default)s).')\nargs = parser.parse_args()\nfname = args.filename\nreps = args.reps\nbaudrate = args.baudrate\nverbose = args.verbose\nprofiling = args.profiling\ntry:\n f = open(fname, 'r')\n ts, cs, ps = load_csv(f)\n ts_rep = []\n for r in range(reps):\n for t in ts:\n ts_rep.append(t + ts[-1] * r)\n cs_rep = cs * reps\n ps_reps = ps * reps\n try:\n ports = list(set(ps))\n serials = {}\n for port in ports:\n ser = serial.Serial(port=port, baudrate=baudrate, write_timeout\n =0, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE,\n parity=serial.PARITY_NONE)\n serials[port] = ser\n runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)\n finally:\n time.sleep(0.5)\n for ser in serials.values():\n ser.close()\nfinally:\n f.close()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\nimport serial\r\nimport time\r\nimport argparse\r\n\r\n \r\ndef write_command(serial, comm, verbose = False, dt = None):\r\n \"\"\" Encodes a command and sends it over the serial port \"\"\"\r\n if verbose and comm != \"\":\r\n if dt is None:\r\n print(\"{} \\t\\t-> {}\".format(comm, serial.port))\r\n else:\r\n print(\"{} \\t\\t-> {} at {:2.3f} ms\".format(comm, serial.port, dt))\r\n serial.write(comm.encode())\r\n \r\ndef read_buffer(serial):\r\n \"\"\" Reads the serial port bufer and decodes it \"\"\"\r\n resp = serial.read_all()\r\n return resp.decode()\r\n\r\ndef read_and_print(serial):\r\n \"\"\" Obtains serial responser and prints it if it's not empty \"\"\"\r\n resp = read_buffer(serial)\r\n if resp != \"\":\r\n print(resp)\r\n \r\n\r\ndef runcommands(cs, ts, ps, serials, verbose = False, profiling = False):\r\n \"\"\" Runs a series of commands at certain specified times \"\"\"\r\n if len(ts) == len(cs):\r\n i = 0\r\n t0 = time.time()\r\n dt = time.time() - t0 # elapsed time\r\n while i < len(cs):\r\n ser = serials[ps[i]]\r\n comm = cs[i]\r\n t = ts[i]\r\n while (dt - t) < 0.0005:\r\n dt = time.time() - t0\r\n if verbose: read_and_print(ser)\r\n if profiling:\r\n write_command(ser, comm, verbose, dt)\r\n else:\r\n write_command(ser, comm, verbose)\r\n i += 1\r\n else:\r\n print('Error: Lists are not equally long. ')\r\n\r\n\r\ndef load_csv(f):\r\n delimiter = ','\r\n ts = []\r\n cs = []\r\n ps = []\r\n for l in f.readlines():\r\n values = l.strip(\"\\n\").split(delimiter)\r\n ts.append(float(values[0]))\r\n cs.append(values[1])\r\n if len(values) <= 3: # if there isn't a third field\r\n values.append(\"\") # add an empty one\r\n p = values[2].strip(\" \") # take all spaces out\r\n if p == \"\": \r\n ps.append(ps[-1]) # use previous one if it's empty\r\n else:\r\n ps.append(p)\r\n return ts, cs, ps\r\n\r\n# Create argument parser\r\n \r\nparser = argparse.ArgumentParser(description='sends a series of commands over the serial port')\r\nparser.add_argument('filename',\r\n type=str, help='CSV file with columns for time, commands and ports')\r\nparser.add_argument('-r', '--reps', required = False, default=1,\r\n type=int, help='Number of command sequence repetitions (default: %(default)s)')\r\nparser.add_argument('-bd', '--baudrate', required = False, default=38400,\r\n type=int, help='Baudrate (default: %(default)s)')\r\nparser.add_argument('-v', '--verbose', required = False,\r\n action='store_true',\r\n help='Print Commands as they are sent (default: %(default)s)')\r\nparser.add_argument('-p', '--profiling', required = False,\r\n action='store_true',\r\n help='Show profiling information if verbose (default: %(default)s).')\r\n \r\n# Get parameters\r\nargs = parser.parse_args()\r\n#print(args.filename)\r\n#print(args.reps)\r\n#print(args.baudrate)\r\n#print(args.verbose)\r\n#print(args.profiling)\r\n\r\n# Parameters\r\nfname = args.filename\r\nreps = args.reps\r\nbaudrate = args.baudrate\r\nverbose = args.verbose\r\nprofiling = args.profiling\r\n\r\n# test.csv -r 2 -b 38400 -v -p\r\n#fname = 'test.csv'\r\n#reps = 2\r\n#baudrate = 38400\r\n#verbose = True\r\n#profiling = True\r\ntry: \r\n f = open(fname, 'r')\r\n ts, cs, ps = load_csv(f)\r\n\r\n # Repeat all lists the specified number of times\r\n ts_rep = [] # offset each rep's times\r\n for r in range(reps):\r\n for t in ts:\r\n ts_rep.append(t + ts[-1]*r)\r\n cs_rep = cs*reps\r\n ps_reps = ps*reps\r\n \r\n # Try to open the serial port connections and run the commands\r\n\r\n try:\r\n # Get list of unique portnames\r\n ports = list(set(ps))\r\n serials = {} # serial connections\r\n for port in ports:\r\n ser = serial.Serial(port = port, \r\n baudrate=baudrate,\r\n write_timeout=0,\r\n bytesize=serial.EIGHTBITS,\r\n stopbits=serial.STOPBITS_ONE,\r\n parity=serial.PARITY_NONE)\r\n serials[port] = ser\r\n runcommands(cs_rep, ts_rep, ps_reps, serials, verbose, profiling)\r\n finally:\r\n time.sleep(0.5)\r\n for ser in serials.values():\r\n ser.close()\r\nfinally:\r\n f.close()",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import pyodbc
from configuration.config import Configuration
from models.entities import Entities
from models.columns import Columns
from models.relationships import Relationship
from models.synonyms import Synonyms
from spacy.lemmatizer import Lemmatizer
from spacy.lookups import Lookups
class DBModel(object):
def __init__(self):
self.entities = []
self.columns = []
self.relationships = []
self.synonyms_col = []
self.synonyms_tab = []
self.entity_graph = []
self.loaded_entities = []
self.config = Configuration()
self.conn = pyodbc.connect(self.config.get_sql_connection_string())
lookups = Lookups()
self.lemmatizer = Lemmatizer(lookups)
self.load_db_model()
def load_db_model(self):
# loading the database from sql server
cursor = self.conn.cursor()
cursor.execute(self.config.get_tables_sql_query())
for row in cursor:
self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))
cursor.execute(self.config.get_columns_sql_query())
current_entity = None
current_entity_name = ""
for row in cursor:
if current_entity_name != row.table_name:
current_entity_name = row.table_name
current_entity = next(en for en in self.entities if en.name == current_entity_name)
col_type = row.type_name
if col_type == "varchar" or col_type == "nvarchar":
col_type = "string"
current_entity.columns.append(Columns(row.column_name, col_type))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_FK_sql_query())
for row in cursor:
self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))
if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)
current_entity[1].append(row.refrenced_table)
else:
self.entity_graph.append((row.parent_table, [row.refrenced_table]))
if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:
current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)
current_entity[1].append(row.parent_table)
else:
self.entity_graph.append((row.refrenced_table, [row.parent_table]))
current_entity = None
current_entity_name = ""
cursor.execute(self.config.get_PK_sql_query())
for row in cursor:
if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:
current_entity = next(en for en in self.entities if en.name == row.table_name)
current_entity.primaryKey = row.primary_key
for entity_to_load in self.config.get_entitites_to_load():
entity_load_query = "select distinct " + entity_to_load["column"] + " from " + entity_to_load["entity"]
cursor.execute(entity_load_query)
entity_data = (entity_to_load["entity"], [])
for row in cursor:
entity_data[1].append(row[0])
# add lemma strings
lemmas = self.lemmatizer(str(row[0]), u'NOUN')
for lemma in lemmas:
entity_data[1].append(str(lemma))
self.loaded_entities.append(entity_data)
# load synonyms from declarative file
# table sysnonyms
for table_synonym in self.config.get_synonyms()["table"]:
orginal_val = table_synonym["original"]
synonyms_vals = table_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))
# column sysnonyms
for column_synonym in self.config.get_synonyms()["column"]:
orginal_val = column_synonym["original"]
synonyms_vals = column_synonym["synonyms"]
for synonyms_val in synonyms_vals:
self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))
# make a single array
self.columns = [column for entity in self.entities for column in entity.columns]
# might have to write a custom matcher TODO
# build the matcher based upon the original value and domain synonyms defined
def get_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", None, nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", None, nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", None, nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", None, nlp(synonym.synonym.lower()))
return matcher
def get_custom_matcher(self, matcher, nlp):
for entity in self.entities:
matcher.add(entity.name.upper() + "_TABLE", nlp(entity.name.lower()))
for column in entity.columns:
matcher.add(column.name.upper() + "_COLUMN", nlp(column.name.lower()))
# add table synonyms to matcher
for synonym in self.synonyms_tab:
for entity in self.entities:
if synonym.column.lower() == entity.name.lower():
matcher.add(entity.name.upper() + "_TABLE", nlp(synonym.synonym.lower()))
# add column synonyms to matcher
for synonym in self.synonyms_col:
for column in self.columns:
if synonym.column.lower() == column.name.lower():
matcher.add(column.name.upper() + "_COLUMN", nlp(synonym.synonym.lower()))
return matcher
|
normal
|
{
"blob_id": "76ebab93441676f9f00b2c2d63435e72c2d5d1ba",
"index": 9936,
"step-1": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n <mask token>\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DBModel(object):\n <mask token>\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-4": "import pyodbc\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.\n get_default_column(row.table_name)))\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = ''\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name ==\n current_entity_name)\n col_type = row.type_name\n if col_type == 'varchar' or col_type == 'nvarchar':\n col_type = 'string'\n current_entity.columns.append(Columns(row.column_name, col_type))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.\n refrenced_table, row.parent_table_col, row.\n referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.\n parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.\n refrenced_table]))\n if len([en for en in self.entity_graph if en[0] == row.\n refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[\n 0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.\n parent_table]))\n current_entity = None\n current_entity_name = ''\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]\n ) == 1:\n current_entity = next(en for en in self.entities if en.name ==\n row.table_name)\n current_entity.primaryKey = row.primary_key\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = 'select distinct ' + entity_to_load['column'\n ] + ' from ' + entity_to_load['entity']\n cursor.execute(entity_load_query)\n entity_data = entity_to_load['entity'], []\n for row in cursor:\n entity_data[1].append(row[0])\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n for table_synonym in self.config.get_synonyms()['table']:\n orginal_val = table_synonym['original']\n synonyms_vals = table_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n for column_synonym in self.config.get_synonyms()['column']:\n orginal_val = column_synonym['original']\n synonyms_vals = column_synonym['synonyms']\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n self.columns = [column for entity in self.entities for column in\n entity.columns]\n\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(entity.\n name.lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n column.name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', None, nlp(\n synonym.synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', None, nlp(\n synonym.synonym.lower()))\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + '_TABLE', nlp(entity.name.\n lower()))\n for column in entity.columns:\n matcher.add(column.name.upper() + '_COLUMN', nlp(column.\n name.lower()))\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + '_TABLE', nlp(synonym\n .synonym.lower()))\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + '_COLUMN', nlp(\n synonym.synonym.lower()))\n return matcher\n",
"step-5": "import pyodbc\n\nfrom configuration.config import Configuration\nfrom models.entities import Entities\nfrom models.columns import Columns\nfrom models.relationships import Relationship\nfrom models.synonyms import Synonyms\n\nfrom spacy.lemmatizer import Lemmatizer\nfrom spacy.lookups import Lookups\n\n\nclass DBModel(object):\n def __init__(self):\n self.entities = []\n self.columns = []\n self.relationships = []\n self.synonyms_col = []\n self.synonyms_tab = []\n self.entity_graph = []\n self.loaded_entities = []\n self.config = Configuration()\n self.conn = pyodbc.connect(self.config.get_sql_connection_string())\n lookups = Lookups()\n self.lemmatizer = Lemmatizer(lookups)\n self.load_db_model()\n\n def load_db_model(self):\n # loading the database from sql server\n cursor = self.conn.cursor()\n cursor.execute(self.config.get_tables_sql_query())\n for row in cursor:\n self.entities.append(Entities(row.table_name, self.config.get_default_column(row.table_name)))\n\n cursor.execute(self.config.get_columns_sql_query())\n current_entity = None\n current_entity_name = \"\"\n for row in cursor:\n if current_entity_name != row.table_name:\n current_entity_name = row.table_name\n current_entity = next(en for en in self.entities if en.name == current_entity_name)\n\n col_type = row.type_name\n if col_type == \"varchar\" or col_type == \"nvarchar\":\n col_type = \"string\"\n current_entity.columns.append(Columns(row.column_name, col_type))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_FK_sql_query())\n for row in cursor:\n self.relationships.append(Relationship(row.parent_table, row.refrenced_table, row.parent_table_col, row.referenced_table_col))\n if len([en for en in self.entity_graph if en[0] == row.parent_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.parent_table)\n current_entity[1].append(row.refrenced_table)\n else:\n self.entity_graph.append((row.parent_table, [row.refrenced_table]))\n \n if len([en for en in self.entity_graph if en[0] == row.refrenced_table]) > 0:\n current_entity = next(en for en in self.entity_graph if en[0] == row.refrenced_table)\n current_entity[1].append(row.parent_table)\n else:\n self.entity_graph.append((row.refrenced_table, [row.parent_table]))\n\n current_entity = None\n current_entity_name = \"\"\n cursor.execute(self.config.get_PK_sql_query())\n for row in cursor:\n if len([en for en in self.entity_graph if en[0] == row.table_name]) == 1:\n current_entity = next(en for en in self.entities if en.name == row.table_name)\n current_entity.primaryKey = row.primary_key\n\n for entity_to_load in self.config.get_entitites_to_load():\n entity_load_query = \"select distinct \" + entity_to_load[\"column\"] + \" from \" + entity_to_load[\"entity\"]\n cursor.execute(entity_load_query)\n entity_data = (entity_to_load[\"entity\"], [])\n for row in cursor:\n entity_data[1].append(row[0])\n # add lemma strings\n lemmas = self.lemmatizer(str(row[0]), u'NOUN')\n for lemma in lemmas:\n entity_data[1].append(str(lemma))\n self.loaded_entities.append(entity_data)\n \n # load synonyms from declarative file\n # table sysnonyms\n for table_synonym in self.config.get_synonyms()[\"table\"]:\n orginal_val = table_synonym[\"original\"]\n synonyms_vals = table_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_tab.append(Synonyms(orginal_val, synonyms_val))\n\n # column sysnonyms\n for column_synonym in self.config.get_synonyms()[\"column\"]:\n orginal_val = column_synonym[\"original\"]\n synonyms_vals = column_synonym[\"synonyms\"]\n for synonyms_val in synonyms_vals:\n self.synonyms_col.append(Synonyms(orginal_val, synonyms_val))\n\n\n # make a single array\n self.columns = [column for entity in self.entities for column in entity.columns]\n \n\n # might have to write a custom matcher TODO\n # build the matcher based upon the original value and domain synonyms defined\n def get_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", None, nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", None, nlp(synonym.synonym.lower())) \n \n\n return matcher\n\n def get_custom_matcher(self, matcher, nlp):\n for entity in self.entities:\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(entity.name.lower())) \n for column in entity.columns:\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(column.name.lower()))\n\n # add table synonyms to matcher\n for synonym in self.synonyms_tab:\n for entity in self.entities:\n if synonym.column.lower() == entity.name.lower():\n matcher.add(entity.name.upper() + \"_TABLE\", nlp(synonym.synonym.lower())) \n\n # add column synonyms to matcher\n for synonym in self.synonyms_col:\n for column in self.columns:\n if synonym.column.lower() == column.name.lower():\n matcher.add(column.name.upper() + \"_COLUMN\", nlp(synonym.synonym.lower())) \n \n\n return matcher\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.