code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
# -*- coding: utf-8 -*-
"""
-----------------------------------------
IDEA Name : PyCharm
Project Name : HelloWorld
-----------------------------------------
File Name : task_worker
Description :
Author : Edwin
Date : 2018/1/4 23:38
-----------------------------------------
Changer : Edwin
Date : 2018/1/4 23:38
Description :
-----------------------------------------
"""
__author__ = 'Edwin'
import queue
import time
from multiprocessing.managers import BaseManager
# 创建类似的QueueManager:
class QueueManager(BaseManager):
pass
def start_request():
# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
# 连接到服务器,也就是运行task_master.py的机器:
server_add = '127.0.0.1'
print('Connect to server %s...' % server_add)
# 端口和验证码注意保持与task_master.py设置的完全一致:
manager = QueueManager(address=(server_add, 5000), authkey=b'abc')
# 从网络连接:
manager.connect()
# 获取Queue的对象:
task = manager.get_task_queue()
result = manager.get_result_queue()
# 从task队列取任务,并把结果写入result队列:
for i in range(10):
try:
n = task.get(timeout=1)
print('run task %d * %d...' % (n, n))
r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))
time.sleep(5)
result.put(r)
except queue.Empty:
print('task queue is empty!')
# 处理结果
print('worker exit..')
if __name__ == '__main__':
start_request()
|
normal
|
{
"blob_id": "be1bfa3e366d715d32613284924cf79abde06d41",
"index": 582,
"step-1": "<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-3": "<mask token>\n__author__ = 'Edwin'\n<mask token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-4": "<mask token>\n__author__ = 'Edwin'\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------\n IDEA Name : PyCharm \n Project Name : HelloWorld\n-----------------------------------------\n File Name : task_worker\n Description :\n Author : Edwin\n Date : 2018/1/4 23:38\n-----------------------------------------\n Changer : Edwin\n Date : 2018/1/4 23:38\n Description : \n-----------------------------------------\n\"\"\"\n__author__ = 'Edwin'\n\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\n# 创建类似的QueueManager:\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n # 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n\n # 连接到服务器,也就是运行task_master.py的机器:\n server_add = '127.0.0.1'\n\n print('Connect to server %s...' % server_add)\n # 端口和验证码注意保持与task_master.py设置的完全一致:\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n # 从网络连接:\n manager.connect()\n # 获取Queue的对象:\n\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n\n # 从task队列取任务,并把结果写入result队列:\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n # 处理结果\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태
print(dic['name'])
|
normal
|
{
"blob_id": "09c3a10230e7d0b3b893ccf236c39fc2dc12b2c6",
"index": 1097,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(dic['name'])\n",
"step-3": "dic = {'name': 'Eric', 'age': '25'}\nprint(dic['name'])\n",
"step-4": "dic = {'name': 'Eric', 'age': '25'} # 딕셔너리 형태\n\n\nprint(dic['name'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import matplotlib.pyplot as plt
file_list = ["Quantification_comet_fdr.csv",
"Quantification_crux_fdr.csv",
"Quantification_msfg_fdr.csv",
"Quantification_msfg_percolator.csv"]
file_titles = ["Comet",
"Crux",
"MSGFPlus",
"MSGFPlus + Percolator"]
protein_list = []
peptides_list = []
for file_name in file_list:
proteins = 0 # n of proteins
peptides = 0
for line_index, line in enumerate(open(file_name, 'r')):
if line_index > 3: # Proteins are listed after row 4
proteins += 1
peptides += int(line.split('\t')[3]) # n_peptides is in column 4
protein_list.append(proteins)
peptides_list.append(peptides)
print(f"{file_name} is done")
plt.bar(file_titles,
protein_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing proteins found")
plt.ylabel("Number of proteins matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_proteins.png")
plt.bar(file_titles,
peptides_list,
color=['black', 'red', 'green', 'blue', 'cyan'],
edgecolor='blue')
plt.title("Comparing amount of peptides matched")
plt.ylabel("Total amount of peptides matched")
plt.tight_layout() # Fixes cut off labels
plt.savefig("search_engines_peptides.png")
|
normal
|
{
"blob_id": "e08159a51b611ce6d0ca354a4fe6759d00af2cb7",
"index": 660,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-3": "<mask token>\nfile_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',\n 'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']\nfile_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']\nprotein_list = []\npeptides_list = []\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-4": "import matplotlib.pyplot as plt\nfile_list = ['Quantification_comet_fdr.csv', 'Quantification_crux_fdr.csv',\n 'Quantification_msfg_fdr.csv', 'Quantification_msfg_percolator.csv']\nfile_titles = ['Comet', 'Crux', 'MSGFPlus', 'MSGFPlus + Percolator']\nprotein_list = []\npeptides_list = []\nfor file_name in file_list:\n proteins = 0\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3:\n proteins += 1\n peptides += int(line.split('\\t')[3])\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f'{file_name} is done')\nplt.bar(file_titles, protein_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing proteins found')\nplt.ylabel('Number of proteins matched')\nplt.tight_layout()\nplt.savefig('search_engines_proteins.png')\nplt.bar(file_titles, peptides_list, color=['black', 'red', 'green', 'blue',\n 'cyan'], edgecolor='blue')\nplt.title('Comparing amount of peptides matched')\nplt.ylabel('Total amount of peptides matched')\nplt.tight_layout()\nplt.savefig('search_engines_peptides.png')\n",
"step-5": "import matplotlib.pyplot as plt\n\nfile_list = [\"Quantification_comet_fdr.csv\",\n \"Quantification_crux_fdr.csv\",\n \"Quantification_msfg_fdr.csv\",\n \"Quantification_msfg_percolator.csv\"]\nfile_titles = [\"Comet\",\n \"Crux\",\n \"MSGFPlus\",\n \"MSGFPlus + Percolator\"]\n\nprotein_list = []\npeptides_list = []\n\nfor file_name in file_list:\n proteins = 0 # n of proteins\n peptides = 0\n for line_index, line in enumerate(open(file_name, 'r')):\n if line_index > 3: # Proteins are listed after row 4\n proteins += 1\n peptides += int(line.split('\\t')[3]) # n_peptides is in column 4\n protein_list.append(proteins)\n peptides_list.append(peptides)\n print(f\"{file_name} is done\")\n\nplt.bar(file_titles,\n protein_list,\n color=['black', 'red', 'green', 'blue', 'cyan'],\n edgecolor='blue')\nplt.title(\"Comparing proteins found\")\nplt.ylabel(\"Number of proteins matched\")\nplt.tight_layout() # Fixes cut off labels\nplt.savefig(\"search_engines_proteins.png\")\n\nplt.bar(file_titles,\n peptides_list,\n color=['black', 'red', 'green', 'blue', 'cyan'],\n edgecolor='blue')\nplt.title(\"Comparing amount of peptides matched\")\nplt.ylabel(\"Total amount of peptides matched\")\nplt.tight_layout() # Fixes cut off labels\nplt.savefig(\"search_engines_peptides.png\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from types import *
class Tokenizer:
def __init__(self, buf):
self.buf = buf
self.index = 0
def token(self):
return self.buf[self.index]
def move(self, value):
self.index += value
def skip_whitespaces(self):
while self.index < len(self.buf) and self.token().isspace():
self.move(1)
def next(self):
self.skip_whitespaces()
if self.index < len(self.buf):
if self.token() == '+':
self.move(1)
return Symbol('+')
elif self.token() == '-':
self.move(1)
return Symbol('-')
elif self.token() == '*':
self.move(1)
return Symbol('*')
elif self.token() == '/':
self.move(1)
return Symbol('/')
elif self.token() == '(':
self.move(1)
return OpenParen()
elif self.token() == ')':
self.move(1)
return CloseParen()
else:
if self.token().isnumeric():
number = int(self.token())
self.move(1)
while self.index < len(self.buf) and self.token().isnumeric():
number = number * 10 + int(self.token())
self.move(1)
return Number(number)
else:
char = self.token()
self.move(1)
return Undefined(char)
else:
return Eof()
|
normal
|
{
"blob_id": "282bccf20cfb114e31c5465c110819796bf81bc0",
"index": 9318,
"step-1": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n <mask token>\n <mask token>\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-2": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n <mask token>\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-3": "<mask token>\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-4": "from types import *\n\n\nclass Tokenizer:\n\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n elif self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-5": "from types import *\n\nclass Tokenizer:\n def __init__(self, buf):\n self.buf = buf\n self.index = 0\n\n def token(self):\n return self.buf[self.index]\n\n def move(self, value):\n self.index += value\n\n def skip_whitespaces(self):\n while self.index < len(self.buf) and self.token().isspace():\n self.move(1)\n\n def next(self):\n self.skip_whitespaces()\n\n if self.index < len(self.buf):\n if self.token() == '+':\n self.move(1)\n return Symbol('+')\n elif self.token() == '-':\n self.move(1)\n return Symbol('-')\n elif self.token() == '*':\n self.move(1)\n return Symbol('*')\n elif self.token() == '/':\n self.move(1)\n return Symbol('/')\n elif self.token() == '(':\n self.move(1)\n return OpenParen()\n elif self.token() == ')':\n self.move(1)\n return CloseParen()\n else:\n if self.token().isnumeric():\n number = int(self.token())\n self.move(1)\n while self.index < len(self.buf) and self.token().isnumeric():\n number = number * 10 + int(self.token())\n self.move(1)\n return Number(number)\n else:\n char = self.token()\n self.move(1)\n return Undefined(char)\n else:\n return Eof()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from pymarketo.client import MarketoClientFactory
import os
import sys #@UnusedImport
import time #@UnusedImport
import datetime #@UnusedImport
from pprint import pprint #@UnresolvedImport
TESTDIR = os.path.split(__file__)[0]
PACKAGEDIR = os.path.join(TESTDIR,"..")
INIFILE = os.path.join(PACKAGEDIR,"marketo.ini")
DATAFILES=["specification","listMObjects"]
# The following must be set up on your marketo account to enable tests
LEADEMAIL = "[email protected]" # Email of an internal contact
LEADLIST = "2wr-0" # List name containing LEADEMAIL contact
SPECIALCODE = "WebReplyJobCode" # If your leads have a custom field that can be
SPECIALVALUE= "WEBREPLY" # asserted for LEADEMAIL, set them here
TESTCAMPAIGN = "SOAP API Access test" # Name of test campaign that has SOAP API trigger enabled
DELETECAMPAIGN = "Delete lead" # Campaign configure to delete leads added to the campaign
# First and last names, and synthetic email addresses for new leads
# These will be added and then deleted
TESTDOMAIN="webreply.com"
TESTNAMES = [("One","Test",TESTDOMAIN),("Two","Test",TESTDOMAIN)]
TESTEMAILS = ["%s.%s@%s" % name for name in TESTNAMES]
mc = MarketoClientFactory(INIFILE)
def compareData(datafile, data):
path = os.path.join(TESTDIR,datafile+".txt")
return open(path).read().strip() == data.strip()
def test_data():
"Make sure that all the test data files are present"
assert os.path.exists(INIFILE)
for datafile in DATAFILES:
assert os.path.exists(os.path.join(TESTDIR,datafile+".txt"))
# Factory methods to build structures for arguments
def aStringArray(strings):
asa = mc.factory.create("ArrayOfString")
asa.stringItem = strings
return asa
def aLeadKey(email=None,id=None):
leadkey = mc.factory.create("LeadKey")
if email:
leadkey.keyType = "EMAIL"
leadkey.keyValue = email
elif id:
leadkey.keyType = "IDNUM"
leadkey.keyValue = id
return leadkey
def aLeadKeyArray(leads):
lka = mc.factory.create("ArrayOfLeadKey")
lka.leadKey = leads
return lka
def aListKey(lk, keyType = "MKTOLISTNAME"):
listkey = mc.factory.create("ListKey")
listkey.keyType = keyType
listkey.keyValue = lk
return listkey
def anAttrib(**kwargs):
attrib = mc.factory.create("Attrib")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttribArray(attribs):
aa = mc.factory.create("ArrayOfAttrib")
aa.attrib=attribs
return aa
def anAttribute(**kwargs):
attrib = mc.factory.create("Attribute")
for key, value in kwargs.items():
setattr(attrib, key, value)
return attrib
def anAttributeArray(attributes):
aa = mc.factory.create("ArrayOfAttribute")
aa.attribute=attributes
return aa
def aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):
lr = mc.factory.create("LeadRecord")
if id:
lr.Id = id
elif email:
lr.Email = email
elif foreignsyspersonid:
assert foreignsystype
lr.ForeignSysPersonId = foreignsyspersonid
lr.ForeignSysType = foreignsystype
if attributes:
lr.leadAttributeList = attributes
return lr
def aLeadRecordArray(leadrecords):
lra = mc.factory.create("ArrayOfLeadRecord")
lra.leadRecord = leadrecords
return lra
# Several things come back with an attribute list that is more pleasant as a dictionary
def attrs2dict(attributelist):
if attributelist is None:
return {}
attributelist = attributelist[0]
d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])
return d
def dict2attrs(d):
al = []
for key, value in d.items():
al.append(anAttribute(attrName=key,attrValue=value))
return anAttributeArray(al)
def test_specification():
compareData("specification", str(mc))
# As of 1.7, these are the methods
# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )
# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )
# Tested: describeMObject(xs:string objectName, )
# Requires having a trigger set for the campaign, from Marketo support:
# Your SOAP request is fine. In order for the getCampaignsForSource call to work,
# you must have a "Campaign is Requested" trigger in the your campaign set to Web Service API.
# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )
# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )
# Tested: getLead(LeadKey leadKey, )
# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )
# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )
# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )
# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )
# Tested: listMObjects()
# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )
# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )
# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )
# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )
# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )
# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )
# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )
# Campaign sources
# <xs:enumeration value="MKTOWS"/>
# <xs:enumeration value="SALES"/>
def test_getCampaignsForSource():
print "Testing getCampaignsForSource"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
resultCount = campaigns.returnCount
campaignrecords = campaigns.campaignRecordList[0]
assert resultCount==len(campaignrecords), "Result count '%s' does not match campaign list '%s'" % (resultCount, len(campaigns))
for campaign in campaignrecords:
print campaign.id, campaign.name, campaign.description
print
def test_getLead():
print "Testing getLead"
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
attrs = attrs2dict(lead.leadAttributeList)
print lead.Id, lead.Email
pprint(attrs)
if SPECIALCODE and SPECIALVALUE:
assert attrs[SPECIALCODE] == SPECIALVALUE
print
# As of 1.7, theses are the activity types
# <xs:enumeration value="VisitWebpage"/>
# <xs:enumeration value="FillOutForm"/>
# <xs:enumeration value="ClickLink"/>
# <xs:enumeration value="RegisterForEvent"/>
# <xs:enumeration value="AttendEvent"/>
# <xs:enumeration value="SendEmail"/>
# <xs:enumeration value="EmailDelivered"/>
# <xs:enumeration value="EmailBounced"/>
# <xs:enumeration value="UnsubscribeEmail"/>
# <xs:enumeration value="OpenEmail"/>
# <xs:enumeration value="ClickEmail"/>
# <xs:enumeration value="NewLead"/>
# <xs:enumeration value="ChangeDataValue"/>
# <xs:enumeration value="LeadAssigned"/>
# <xs:enumeration value="NewSFDCOpprtnty"/>
# <xs:enumeration value="Wait"/>
# <xs:enumeration value="RunSubflow"/>
# <xs:enumeration value="RemoveFromFlow"/>
# <xs:enumeration value="PushLeadToSales"/>
# <xs:enumeration value="CreateTask"/>
# <xs:enumeration value="ConvertLead"/>
# <xs:enumeration value="ChangeScore"/>
# <xs:enumeration value="ChangeOwner"/>
# <xs:enumeration value="AddToList"/>
# <xs:enumeration value="RemoveFromList"/>
# <xs:enumeration value="SFDCActivity"/>
# <xs:enumeration value="EmailBouncedSoft"/>
# <xs:enumeration value="PushLeadUpdatesToSales"/>
# <xs:enumeration value="DeleteLeadFromSales"/>
# <xs:enumeration value="SFDCActivityUpdated"/>
# <xs:enumeration value="SFDCMergeLeads"/>
# <xs:enumeration value="MergeLeads"/>
# <xs:enumeration value="ResolveConflicts"/>
# <xs:enumeration value="AssocWithOpprtntyInSales"/>
# <xs:enumeration value="DissocFromOpprtntyInSales"/>
# <xs:enumeration value="UpdateOpprtntyInSales"/>
# <xs:enumeration value="DeleteLead"/>
# <xs:enumeration value="SendAlert"/>
# <xs:enumeration value="SendSalesEmail"/>
# <xs:enumeration value="OpenSalesEmail"/>
# <xs:enumeration value="ClickSalesEmail"/>
# <xs:enumeration value="AddtoSFDCCampaign"/>
# <xs:enumeration value="RemoveFromSFDCCampaign"/>
# <xs:enumeration value="ChangeStatusInSFDCCampaign"/>
# <xs:enumeration value="ReceiveSalesEmail"/>
# <xs:enumeration value="InterestingMoment"/>
# <xs:enumeration value="RequestCampaign"/>
# <xs:enumeration value="SalesEmailBounced"/>
# <xs:enumeration value="ChangeLeadPartition"/>
# <xs:enumeration value="ChangeRevenueStage"/>
# <xs:enumeration value="ChangeRevenueStageManually"/>
# <xs:enumeration value="ComputeDataValue"/>
# <xs:enumeration value="ChangeStatusInProgression"/>
# <xs:enumeration value="ChangeFieldInProgram"/>
# <xs:enumeration value="EnrichWithJigsaw"/>
def test_getLeadActivity():
print "Testing getLeadActivity"
leadkey = aLeadKey(email=LEADEMAIL)
activities = mc.service.getLeadActivity(leadkey,"")
assert activities.returnCount > 0
activityrecords = activities.activityRecordList[0]
assert len(activityrecords) == activities.returnCount
for activity in activityrecords:
print "Activity", activity.activityDateTime,activity.activityType
attrs = attrs2dict(activity.activityAttributes)
pprint(attrs)
print
def test_requestCampaign():
print "Testing requestCampaign"
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == TESTCAMPAIGN:
print "Found", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
leadkey = aLeadKey(email=LEADEMAIL)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
leadid = lead.Id
# Add key appears to want ID
leadkey = aLeadKey(id=leadid)
lka = aLeadKeyArray([leadkey])
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
assert result.success
print
def test_deleteLeads():
# Depends on a campaign that deletes leads as they ar added
# We also need to know the IDNUM for the contacts
lka = []
for email in TESTEMAILS:
leadkey = aLeadKey(email=email)
lead = mc.service.getLead(leadkey)
assert lead.count == 1
lead = lead.leadRecordList.leadRecord[0]
lka.append(aLeadKey(id=lead.Id))
print "Found lead", lead.Id, lead.Email
lka = aLeadKeyArray(lka)
campaigns = mc.service.getCampaignsForSource("MKTOWS",None,False)
campaignrecords = campaigns.campaignRecordList[0]
campaignid = None
for campaign in campaignrecords:
if campaign.name == DELETECAMPAIGN:
print "Found campaign", campaign.id, campaign.name, campaign.description
campaignid = campaign.id
break
assert campaignid != None
result = mc.service.requestCampaign("MKTOWS", campaignid, lka)
print result
def test_getLeadChanges():
print "Testing getLeadChanges"
since = datetime.datetime(year=2010,month=1, day=1)
changes = mc.service.getLeadChanges("",since,10)
assert changes.returnCount == 10
changerecords = changes.leadChangeRecordList[0]
assert len(changerecords) == changes.returnCount
for change in changerecords:
print "leadChange", change.activityDateTime,change.activityType
pprint(attrs2dict(change.activityAttributes))
print
def test_getMultipleLeads():
print "Testing getMultipleLeads"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
def test_getMultipleLeadsUnsubscribedFlag():
print "Testing getMultipleLeadsUnsubscribedFlag"
lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)
attributelist = aStringArray(["Suppressed"])
leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)
assert leads.returnCount == 10
leadrecords = leads.leadRecordList[0]
assert len(leadrecords) == 10
for lead in leadrecords:
attrs = attrs2dict(lead.leadAttributeList)
print "Lead", lead.Id, lead.Email
pprint(attrs)
print
# Valid list operations as of 1.7
# <xs:enumeration value="ADDTOLIST"/>
# <xs:enumeration value="ISMEMBEROFLIST"/>
# <xs:enumeration value="REMOVEFROMLIST"/>
# Valid list types
# <xs:enumeration value="MKTOLISTNAME"/>
# <xs:enumeration value="MKTOSALESUSERID"/>
# <xs:enumeration value="SFDCLEADOWNERID"/>
def test_listOperation():
print "Testing listOperation"
# Require numeric id fields
leadkey = aLeadKey(id=1256) # Is member
leadkey2 = aLeadKey(id=1) # Is not member
result = mc.service.listOperation("ISMEMBEROFLIST",aListKey(LEADLIST),
aLeadKeyArray([leadkey,leadkey2]),True)
print "listOperation", result
def test_syncLead():
print "Testing syncLead"
# This test does a create the first time only.
# The name and email are used in the "standard" marketo API examples
attrs = dict(FirstName="Sam",LastName="Haggy")
leadrecord = aLeadRecord(email="[email protected]",attributes=dict2attrs(attrs))
result = mc.service.syncLead(leadrecord, True, None)
print result.leadId, result.syncStatus.status
def test_syncMultipleLeads():
print "Testing syncMultipleLeads"
leadrecords = []
for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):
leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))
leadrecords.append(leadrecord)
lra = aLeadRecordArray(leadrecords)
print lra
result = mc.service.syncMultipleLeads(lra)
print result
print
def test_listMObjects():
print "Testing listMObjects"
mobjects = mc.service.listMObjects()
compareData("listMObjects", str(mobjects))
print
def test_describeMObject():
print "Testing describeMObject"
mobjects = ["ActivityRecord","LeadRecord","Opportunity","OpportunityPersonRole",]
descriptions = []
for mobject in mobjects:
descriptions.append(str(mc.service.describeMObject(mobject)))
descriptions = "\n".join(descriptions)
compareData("describeMObjects", descriptions)
print
if __name__ == "__main__":
test_data()
test_specification()
test_getLead()
test_getCampaignsForSource()
test_requestCampaign()
test_getLeadActivity()
test_getLeadChanges()
test_listMObjects()
test_describeMObject()
test_getLeadActivity()
test_getMultipleLeads()
test_getMultipleLeadsUnsubscribedFlag()
test_listOperation()
test_syncLead()
test_syncMultipleLeads()
test_deleteLeads()
print "All is well"
|
normal
|
{
"blob_id": "b05a5fcbba74bf4108bc953c6f868eb1f5ca298f",
"index": 638,
"step-1": "from pymarketo.client import MarketoClientFactory\nimport os\nimport sys #@UnusedImport\nimport time #@UnusedImport\nimport datetime #@UnusedImport\nfrom pprint import pprint #@UnresolvedImport\n\nTESTDIR = os.path.split(__file__)[0]\nPACKAGEDIR = os.path.join(TESTDIR,\"..\")\nINIFILE = os.path.join(PACKAGEDIR,\"marketo.ini\")\nDATAFILES=[\"specification\",\"listMObjects\"]\n\n\n# The following must be set up on your marketo account to enable tests\nLEADEMAIL = \"[email protected]\" # Email of an internal contact\nLEADLIST = \"2wr-0\" # List name containing LEADEMAIL contact\nSPECIALCODE = \"WebReplyJobCode\" # If your leads have a custom field that can be\nSPECIALVALUE= \"WEBREPLY\" # asserted for LEADEMAIL, set them here\nTESTCAMPAIGN = \"SOAP API Access test\" # Name of test campaign that has SOAP API trigger enabled\nDELETECAMPAIGN = \"Delete lead\" # Campaign configure to delete leads added to the campaign\n\n# First and last names, and synthetic email addresses for new leads\n# These will be added and then deleted\nTESTDOMAIN=\"webreply.com\"\nTESTNAMES = [(\"One\",\"Test\",TESTDOMAIN),(\"Two\",\"Test\",TESTDOMAIN)]\nTESTEMAILS = [\"%s.%s@%s\" % name for name in TESTNAMES]\n\n\nmc = MarketoClientFactory(INIFILE)\n\ndef compareData(datafile, data):\n path = os.path.join(TESTDIR,datafile+\".txt\")\n return open(path).read().strip() == data.strip()\n\ndef test_data():\n \"Make sure that all the test data files are present\"\n assert os.path.exists(INIFILE)\n for datafile in DATAFILES:\n assert os.path.exists(os.path.join(TESTDIR,datafile+\".txt\"))\n \n# Factory methods to build structures for arguments\ndef aStringArray(strings):\n asa = mc.factory.create(\"ArrayOfString\")\n asa.stringItem = strings\n return asa\n\ndef aLeadKey(email=None,id=None):\n leadkey = mc.factory.create(\"LeadKey\")\n if email:\n leadkey.keyType = \"EMAIL\"\n leadkey.keyValue = email\n elif id:\n leadkey.keyType = \"IDNUM\"\n leadkey.keyValue = id\n return leadkey\n\ndef aLeadKeyArray(leads):\n lka = mc.factory.create(\"ArrayOfLeadKey\")\n lka.leadKey = leads\n return lka\n\ndef aListKey(lk, keyType = \"MKTOLISTNAME\"):\n listkey = mc.factory.create(\"ListKey\")\n listkey.keyType = keyType\n listkey.keyValue = lk\n return listkey\n\ndef anAttrib(**kwargs):\n attrib = mc.factory.create(\"Attrib\")\n for key, value in kwargs.items():\n setattr(attrib, key, value)\n return attrib\n\ndef anAttribArray(attribs):\n aa = mc.factory.create(\"ArrayOfAttrib\")\n aa.attrib=attribs\n return aa\n\ndef anAttribute(**kwargs):\n attrib = mc.factory.create(\"Attribute\")\n for key, value in kwargs.items():\n setattr(attrib, key, value)\n return attrib\n\ndef anAttributeArray(attributes):\n aa = mc.factory.create(\"ArrayOfAttribute\")\n aa.attribute=attributes\n return aa\n\ndef aLeadRecord(id=None, email=None, foreignsyspersonid=None,foreignsystype=None,attributes=None):\n lr = mc.factory.create(\"LeadRecord\")\n if id:\n lr.Id = id\n elif email:\n lr.Email = email\n elif foreignsyspersonid:\n assert foreignsystype\n lr.ForeignSysPersonId = foreignsyspersonid\n lr.ForeignSysType = foreignsystype\n if attributes:\n lr.leadAttributeList = attributes\n return lr\n\ndef aLeadRecordArray(leadrecords):\n lra = mc.factory.create(\"ArrayOfLeadRecord\")\n lra.leadRecord = leadrecords\n return lra\n\n# Several things come back with an attribute list that is more pleasant as a dictionary\ndef attrs2dict(attributelist):\n if attributelist is None:\n return {}\n attributelist = attributelist[0]\n d = dict([(attr.attrName,attr.attrValue) for attr in attributelist])\n return d\ndef dict2attrs(d):\n al = []\n for key, value in d.items():\n al.append(anAttribute(attrName=key,attrValue=value))\n return anAttributeArray(al)\n\ndef test_specification():\n compareData(\"specification\", str(mc))\n\n\n# As of 1.7, these are the methods \n# Untested: deleteCustomObjects(xs:string objTypeName, ArrayOfKeyList customObjKeyLists, )\n# UnTested: deleteMObjects(ArrayOfMObject mObjectList, )\n# Tested: describeMObject(xs:string objectName, )\n# Requires having a trigger set for the campaign, from Marketo support:\n# Your SOAP request is fine. In order for the getCampaignsForSource call to work, \n# you must have a \"Campaign is Requested\" trigger in the your campaign set to Web Service API.\n# Tested: getCampaignsForSource(ReqCampSourceType source, xs:string name, xs:boolean exactName, )\n# Untested: getCustomObjects(xs:string objTypeName, xs:string streamPosition, xs:int batchSize, ArrayOfAttribute customObjKeyList, ArrayOfString includeAttributes, )\n# Tested: getLead(LeadKey leadKey, )\n# Tested: getLeadActivity(LeadKey leadKey, ActivityTypeFilter activityFilter, StreamPosition startPosition, xs:int batchSize, )\n# Tested: getLeadChanges(StreamPosition startPosition, ActivityTypeFilter activityFilter, xs:int batchSize, )\n# getMObjects(xs:string type, xs:int id, Attrib externalKey, ArrayOfMObjCriteria mObjCriteriaList, ArrayOfMObjAssociation mObjAssociationList, xs:string streamPosition, )\n# Tested: getMultipleLeads(xs:dateTime lastUpdatedAt, xs:string streamPosition, xs:int batchSize, ArrayOfString includeAttributes, )\n# Tested: listMObjects()\n# Tested: listOperation(ListOperationType listOperation, ListKey listKey, ArrayOfLeadKey listMemberList, xs:boolean strict, )\n# mergeLeads(ArrayOfAttribute winningLeadKeyList, ArrayOfKeyList losingLeadKeyLists, )\n# requestCampaign(ReqCampSourceType source, xs:int campaignId, ArrayOfLeadKey leadList, )\n# syncCustomObjects(xs:string objTypeName, ArrayOfCustomObj customObjList, SyncOperationEnum operation, )\n# Tested: syncLead(LeadRecord leadRecord, xs:boolean returnLead, xs:string marketoCookie, )\n# Untested: syncMObjects(ArrayOfMObject mObjectList, SyncOperationEnum operation, )\n# Tested: syncMultipleLeads(ArrayOfLeadRecord leadRecordList, xs:boolean dedupEnabled, )\n\n# Campaign sources\n# <xs:enumeration value=\"MKTOWS\"/>\n# <xs:enumeration value=\"SALES\"/>\n\ndef test_getCampaignsForSource():\n print \"Testing getCampaignsForSource\"\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n resultCount = campaigns.returnCount\n campaignrecords = campaigns.campaignRecordList[0]\n assert resultCount==len(campaignrecords), \"Result count '%s' does not match campaign list '%s'\" % (resultCount, len(campaigns))\n for campaign in campaignrecords:\n print campaign.id, campaign.name, campaign.description\n print\n\n\n\ndef test_getLead():\n print \"Testing getLead\"\n leadkey = aLeadKey(email=LEADEMAIL)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n attrs = attrs2dict(lead.leadAttributeList)\n print lead.Id, lead.Email\n pprint(attrs)\n if SPECIALCODE and SPECIALVALUE:\n assert attrs[SPECIALCODE] == SPECIALVALUE\n print\n\n \n# As of 1.7, theses are the activity types\n# <xs:enumeration value=\"VisitWebpage\"/>\n# <xs:enumeration value=\"FillOutForm\"/>\n# <xs:enumeration value=\"ClickLink\"/>\n# <xs:enumeration value=\"RegisterForEvent\"/>\n# <xs:enumeration value=\"AttendEvent\"/>\n# <xs:enumeration value=\"SendEmail\"/>\n# <xs:enumeration value=\"EmailDelivered\"/>\n# <xs:enumeration value=\"EmailBounced\"/>\n# <xs:enumeration value=\"UnsubscribeEmail\"/>\n# <xs:enumeration value=\"OpenEmail\"/>\n# <xs:enumeration value=\"ClickEmail\"/>\n# <xs:enumeration value=\"NewLead\"/>\n# <xs:enumeration value=\"ChangeDataValue\"/>\n# <xs:enumeration value=\"LeadAssigned\"/>\n# <xs:enumeration value=\"NewSFDCOpprtnty\"/>\n# <xs:enumeration value=\"Wait\"/>\n# <xs:enumeration value=\"RunSubflow\"/>\n# <xs:enumeration value=\"RemoveFromFlow\"/>\n# <xs:enumeration value=\"PushLeadToSales\"/>\n# <xs:enumeration value=\"CreateTask\"/>\n# <xs:enumeration value=\"ConvertLead\"/>\n# <xs:enumeration value=\"ChangeScore\"/>\n# <xs:enumeration value=\"ChangeOwner\"/>\n# <xs:enumeration value=\"AddToList\"/>\n# <xs:enumeration value=\"RemoveFromList\"/>\n# <xs:enumeration value=\"SFDCActivity\"/>\n# <xs:enumeration value=\"EmailBouncedSoft\"/>\n# <xs:enumeration value=\"PushLeadUpdatesToSales\"/>\n# <xs:enumeration value=\"DeleteLeadFromSales\"/>\n# <xs:enumeration value=\"SFDCActivityUpdated\"/>\n# <xs:enumeration value=\"SFDCMergeLeads\"/>\n# <xs:enumeration value=\"MergeLeads\"/>\n# <xs:enumeration value=\"ResolveConflicts\"/>\n# <xs:enumeration value=\"AssocWithOpprtntyInSales\"/>\n# <xs:enumeration value=\"DissocFromOpprtntyInSales\"/>\n# <xs:enumeration value=\"UpdateOpprtntyInSales\"/>\n# <xs:enumeration value=\"DeleteLead\"/>\n# <xs:enumeration value=\"SendAlert\"/>\n# <xs:enumeration value=\"SendSalesEmail\"/>\n# <xs:enumeration value=\"OpenSalesEmail\"/>\n# <xs:enumeration value=\"ClickSalesEmail\"/>\n# <xs:enumeration value=\"AddtoSFDCCampaign\"/>\n# <xs:enumeration value=\"RemoveFromSFDCCampaign\"/>\n# <xs:enumeration value=\"ChangeStatusInSFDCCampaign\"/>\n# <xs:enumeration value=\"ReceiveSalesEmail\"/>\n# <xs:enumeration value=\"InterestingMoment\"/>\n# <xs:enumeration value=\"RequestCampaign\"/>\n# <xs:enumeration value=\"SalesEmailBounced\"/>\n# <xs:enumeration value=\"ChangeLeadPartition\"/>\n# <xs:enumeration value=\"ChangeRevenueStage\"/>\n# <xs:enumeration value=\"ChangeRevenueStageManually\"/>\n# <xs:enumeration value=\"ComputeDataValue\"/>\n# <xs:enumeration value=\"ChangeStatusInProgression\"/>\n# <xs:enumeration value=\"ChangeFieldInProgram\"/>\n# <xs:enumeration value=\"EnrichWithJigsaw\"/>\ndef test_getLeadActivity():\n print \"Testing getLeadActivity\"\n leadkey = aLeadKey(email=LEADEMAIL)\n activities = mc.service.getLeadActivity(leadkey,\"\")\n assert activities.returnCount > 0\n activityrecords = activities.activityRecordList[0]\n assert len(activityrecords) == activities.returnCount\n for activity in activityrecords:\n print \"Activity\", activity.activityDateTime,activity.activityType\n attrs = attrs2dict(activity.activityAttributes)\n pprint(attrs)\n print\n \ndef test_requestCampaign():\n print \"Testing requestCampaign\"\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n campaignrecords = campaigns.campaignRecordList[0]\n campaignid = None\n for campaign in campaignrecords:\n if campaign.name == TESTCAMPAIGN:\n print \"Found\", campaign.id, campaign.name, campaign.description\n campaignid = campaign.id\n break\n assert campaignid != None\n leadkey = aLeadKey(email=LEADEMAIL)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n leadid = lead.Id\n # Add key appears to want ID\n leadkey = aLeadKey(id=leadid)\n lka = aLeadKeyArray([leadkey])\n result = mc.service.requestCampaign(\"MKTOWS\", campaignid, lka) \n assert result.success\n print\n \ndef test_deleteLeads():\n # Depends on a campaign that deletes leads as they ar added\n # We also need to know the IDNUM for the contacts\n lka = []\n for email in TESTEMAILS:\n leadkey = aLeadKey(email=email)\n lead = mc.service.getLead(leadkey)\n assert lead.count == 1\n lead = lead.leadRecordList.leadRecord[0]\n lka.append(aLeadKey(id=lead.Id))\n print \"Found lead\", lead.Id, lead.Email\n lka = aLeadKeyArray(lka)\n campaigns = mc.service.getCampaignsForSource(\"MKTOWS\",None,False)\n campaignrecords = campaigns.campaignRecordList[0]\n campaignid = None\n for campaign in campaignrecords:\n if campaign.name == DELETECAMPAIGN:\n print \"Found campaign\", campaign.id, campaign.name, campaign.description\n campaignid = campaign.id\n break\n assert campaignid != None\n result = mc.service.requestCampaign(\"MKTOWS\", campaignid, lka)\n print result\n \ndef test_getLeadChanges():\n print \"Testing getLeadChanges\"\n since = datetime.datetime(year=2010,month=1, day=1)\n changes = mc.service.getLeadChanges(\"\",since,10)\n assert changes.returnCount == 10\n changerecords = changes.leadChangeRecordList[0]\n assert len(changerecords) == changes.returnCount\n for change in changerecords:\n print \"leadChange\", change.activityDateTime,change.activityType\n pprint(attrs2dict(change.activityAttributes))\n print\n\ndef test_getMultipleLeads():\n print \"Testing getMultipleLeads\"\n lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)\n leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10)\n assert leads.returnCount == 10\n leadrecords = leads.leadRecordList[0]\n assert len(leadrecords) == 10\n for lead in leadrecords:\n attrs = attrs2dict(lead.leadAttributeList)\n print \"Lead\", lead.Id, lead.Email\n pprint(attrs)\n print\n\ndef test_getMultipleLeadsUnsubscribedFlag():\n print \"Testing getMultipleLeadsUnsubscribedFlag\"\n lastUpdatedAt = datetime.datetime(year=2010,month=1, day=1)\n attributelist = aStringArray([\"Suppressed\"])\n leads = mc.service.getMultipleLeads(lastUpdatedAt,None,10, attributelist)\n assert leads.returnCount == 10\n leadrecords = leads.leadRecordList[0]\n assert len(leadrecords) == 10\n for lead in leadrecords:\n attrs = attrs2dict(lead.leadAttributeList)\n print \"Lead\", lead.Id, lead.Email\n pprint(attrs)\n print\n\n# Valid list operations as of 1.7\n# <xs:enumeration value=\"ADDTOLIST\"/>\n# <xs:enumeration value=\"ISMEMBEROFLIST\"/>\n# <xs:enumeration value=\"REMOVEFROMLIST\"/>\n\n# Valid list types\n# <xs:enumeration value=\"MKTOLISTNAME\"/>\n# <xs:enumeration value=\"MKTOSALESUSERID\"/>\n# <xs:enumeration value=\"SFDCLEADOWNERID\"/>\n\ndef test_listOperation():\n print \"Testing listOperation\"\n # Require numeric id fields\n leadkey = aLeadKey(id=1256) # Is member\n leadkey2 = aLeadKey(id=1) # Is not member\n result = mc.service.listOperation(\"ISMEMBEROFLIST\",aListKey(LEADLIST),\n aLeadKeyArray([leadkey,leadkey2]),True)\n print \"listOperation\", result\n \ndef test_syncLead():\n print \"Testing syncLead\"\n # This test does a create the first time only.\n # The name and email are used in the \"standard\" marketo API examples\n attrs = dict(FirstName=\"Sam\",LastName=\"Haggy\")\n leadrecord = aLeadRecord(email=\"[email protected]\",attributes=dict2attrs(attrs))\n result = mc.service.syncLead(leadrecord, True, None)\n print result.leadId, result.syncStatus.status\n \ndef test_syncMultipleLeads():\n print \"Testing syncMultipleLeads\" \n leadrecords = []\n for email, (firstname,lastname,domain) in zip(TESTEMAILS, TESTNAMES):\n leadrecord = aLeadRecord(email=email.lower(), attributes=dict2attrs(dict(FirstName=firstname,LastName=lastname)))\n leadrecords.append(leadrecord)\n lra = aLeadRecordArray(leadrecords)\n print lra\n result = mc.service.syncMultipleLeads(lra)\n print result\n print\n \ndef test_listMObjects():\n print \"Testing listMObjects\"\n mobjects = mc.service.listMObjects()\n compareData(\"listMObjects\", str(mobjects))\n print\n \ndef test_describeMObject():\n print \"Testing describeMObject\"\n mobjects = [\"ActivityRecord\",\"LeadRecord\",\"Opportunity\",\"OpportunityPersonRole\",]\n descriptions = []\n for mobject in mobjects:\n descriptions.append(str(mc.service.describeMObject(mobject)))\n descriptions = \"\\n\".join(descriptions)\n compareData(\"describeMObjects\", descriptions)\n print\n\n\nif __name__ == \"__main__\":\n test_data()\n test_specification()\n test_getLead()\n test_getCampaignsForSource() \n test_requestCampaign()\n test_getLeadActivity()\n test_getLeadChanges()\n test_listMObjects()\n test_describeMObject()\n test_getLeadActivity()\n test_getMultipleLeads()\n test_getMultipleLeadsUnsubscribedFlag()\n test_listOperation()\n test_syncLead()\n test_syncMultipleLeads()\n test_deleteLeads()\n print \"All is well\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.urls import path, include
from rest_framework.routers import SimpleRouter
from board_api.views import PostViewSet, UpvoteView, CommentViewSet
router = SimpleRouter()
router.register(r"post", PostViewSet)
router.register(r"post_upvote", UpvoteView)
router.register(r"comment", CommentViewSet)
urlpatterns = [
path("", include(router.urls)),
]
|
normal
|
{
"blob_id": "db309283137383cd698f235e7326c6e5c50f6cf3",
"index": 6671,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouter.register('post', PostViewSet)\nrouter.register('post_upvote', UpvoteView)\nrouter.register('comment', CommentViewSet)\n<mask token>\n",
"step-3": "<mask token>\nrouter = SimpleRouter()\nrouter.register('post', PostViewSet)\nrouter.register('post_upvote', UpvoteView)\nrouter.register('comment', CommentViewSet)\nurlpatterns = [path('', include(router.urls))]\n",
"step-4": "from django.urls import path, include\nfrom rest_framework.routers import SimpleRouter\nfrom board_api.views import PostViewSet, UpvoteView, CommentViewSet\nrouter = SimpleRouter()\nrouter.register('post', PostViewSet)\nrouter.register('post_upvote', UpvoteView)\nrouter.register('comment', CommentViewSet)\nurlpatterns = [path('', include(router.urls))]\n",
"step-5": "from django.urls import path, include\nfrom rest_framework.routers import SimpleRouter\n\nfrom board_api.views import PostViewSet, UpvoteView, CommentViewSet\n\nrouter = SimpleRouter()\n\nrouter.register(r\"post\", PostViewSet)\nrouter.register(r\"post_upvote\", UpvoteView)\nrouter.register(r\"comment\", CommentViewSet)\n\nurlpatterns = [\n path(\"\", include(router.urls)),\n]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Settings module for test app."""
ENV = "development"
TESTING = True
SQLALCHEMY_DATABASE_URI = "sqlite://"
SECRET_KEY = "not-so-secret-in-tests"
DEBUG_TB_ENABLED = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
APP_ENV = "testing"
JWT_SECRET_KEY = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJi"
"bXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a0"
"3GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4i"
"c7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXB"
"wkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXk"
"Xs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY"
"4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDt"
"jEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2X"
"IpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWi"
"z+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT"
"/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxr"
"E9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw=="
"\n-----END RSA PRIVATE KEY-----"
)
JWT_PUBLIC_KEY = (
"-----BEGIN PUBLIC KEY-----\n"
"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9"
"iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+M"
"uSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRg"
"EKwIDAQAB"
"\n-----END PUBLIC KEY-----"
)
|
normal
|
{
"blob_id": "909ea7b9335a858662f83abc71b4d58578bd0850",
"index": 8261,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nENV = 'development'\nTESTING = True\nSQLALCHEMY_DATABASE_URI = 'sqlite://'\nSECRET_KEY = 'not-so-secret-in-tests'\nDEBUG_TB_ENABLED = False\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nAPP_ENV = 'testing'\nJWT_SECRET_KEY = \"\"\"-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4ic7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXBwkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXkXs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDtjEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2XIpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWiz+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxrE9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==\n-----END RSA PRIVATE KEY-----\"\"\"\nJWT_PUBLIC_KEY = \"\"\"-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB\n-----END PUBLIC KEY-----\"\"\"\n",
"step-3": "\"\"\"Settings module for test app.\"\"\"\nENV = \"development\"\nTESTING = True\nSQLALCHEMY_DATABASE_URI = \"sqlite://\"\nSECRET_KEY = \"not-so-secret-in-tests\"\nDEBUG_TB_ENABLED = False\nSQLALCHEMY_TRACK_MODIFICATIONS = False\n\nAPP_ENV = \"testing\"\n\n\nJWT_SECRET_KEY = (\n \"-----BEGIN RSA PRIVATE KEY-----\\n\"\n \"MIICWwIBAAKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJi\"\n \"bXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a0\"\n \"3GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQABAoGAD+onAtVye4i\"\n \"c7VR7V50DF9bOnwRwNXrARcDhq9LWNRrRGElESYYTQ6EbatXS3MCyjjX2eMhu/aF5YhXB\"\n \"wkppwxg+EOmXeh+MzL7Zh284OuPbkglAaGhV9bb6/5CpuGb1esyPbYW+Ty2PC0GSZfIXk\"\n \"Xs76jXAu9TOBvD0ybc2YlkCQQDywg2R/7t3Q2OE2+yo382CLJdrlSLVROWKwb4tb2PjhY\"\n \"4XAwV8d1vy0RenxTB+K5Mu57uVSTHtrMK0GAtFr833AkEA6avx20OHo61Yela/4k5kQDt\"\n \"jEf1N0LfI+BcWZtxsS3jDM3i1Hp0KSu5rsCPb8acJo5RO26gGVrfAsDcIXKC+bQJAZZ2X\"\n \"IpsitLyPpuiMOvBbzPavd4gY6Z8KWrfYzJoI/Q9FuBo6rKwl4BFoToD7WIUS+hpkagwWi\"\n \"z+6zLoX1dbOZwJACmH5fSSjAkLRi54PKJ8TFUeOP15h9sQzydI8zJU+upvDEKZsZc/UhT\"\n \"/SySDOxQ4G/523Y0sz/OZtSWcol/UMgQJALesy++GdvoIDLfJX5GBQpuFgFenRiRDabxr\"\n \"E9MNUZ2aPFaFp+DyAe+b4nDwuJaW2LURbr8AEZga7oQj0uYxcYw==\"\n \"\\n-----END RSA PRIVATE KEY-----\"\n)\n\nJWT_PUBLIC_KEY = (\n \"-----BEGIN PUBLIC KEY-----\\n\"\n \"MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3WojgGHFHYLugdUWAY9\"\n \"iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlvdbH6dvEOfou0/gCFQsHUfQrSDv+M\"\n \"uSUMAe8jzKE4qW+jK+xQU9a03GUnKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRg\"\n \"EKwIDAQAB\"\n \"\\n-----END PUBLIC KEY-----\"\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#%%
import numpy as np
import cv2
import matplotlib.pyplot as plt
import win32gui,win32ui,win32con,win32api
import pyautogui as pg
from PIL import ImageGrab
import time
import pandas as pd
# %%
def get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):
handle_of_hearthstone=win32gui.FindWindow(lpClassName,lpWindowName)
return win32gui.GetClientRect(handle_of_hearthstone)
def countdown(n):
for i in np.arange(n,0,-1):
print(i)
time.sleep(1)
countdown(5)
corner=pg.position()
window=np.array(get_window())
window[:2]+=corner
window[2:]+=corner
window=tuple(window)
def currentmouse():
return pg.position()
def get_pic():
return np.array(ImageGrab.grab(window))
def closewindow():
cv2.waitKey(0)
cv2.destroyAllWindows()
#%%
states_str=['主界面','选牌界面','战斗界面','收藏界面','搜索界面','手牌更换','战斗结果']
states_num=[0,1,2,3,4,5,6]
states=pd.DataFrame(states_str,index=states_num)
print(states)
count=np.load('count.npy')
while(True):
pic=get_pic()
cv2.imshow('output',pic)
key=chr(cv2.waitKey(0))
cv2.destroyAllWindows()
if key=='q':#quit
break
elif key=='d':#discard
pass
countdown(5)
else:
count+=1
plt.imsave('./dataset/{}_{}.png'.format(key,count[0]),pic)
countdown(5)
np.save('count.npy',count)
#%% 收集按钮位置
if False:
countdown(5)
print(pg.position())
print(np.array(get_window()))
print(np.array(pg.position())-corner)
|
normal
|
{
"blob_id": "e36d2426fb8a268ab9ff4f3d6135aa72697e6326",
"index": 1505,
"step-1": "<mask token>\n\n\ndef get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):\n handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)\n return win32gui.GetClientRect(handle_of_hearthstone)\n\n\ndef countdown(n):\n for i in np.arange(n, 0, -1):\n print(i)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef currentmouse():\n return pg.position()\n\n\ndef get_pic():\n return np.array(ImageGrab.grab(window))\n\n\ndef closewindow():\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):\n handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)\n return win32gui.GetClientRect(handle_of_hearthstone)\n\n\ndef countdown(n):\n for i in np.arange(n, 0, -1):\n print(i)\n time.sleep(1)\n\n\ncountdown(5)\n<mask token>\nwindow[:2] += corner\nwindow[2:] += corner\n<mask token>\n\n\ndef currentmouse():\n return pg.position()\n\n\ndef get_pic():\n return np.array(ImageGrab.grab(window))\n\n\ndef closewindow():\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n<mask token>\nprint(states)\n<mask token>\nwhile True:\n pic = get_pic()\n cv2.imshow('output', pic)\n key = chr(cv2.waitKey(0))\n cv2.destroyAllWindows()\n if key == 'q':\n break\n elif key == 'd':\n pass\n countdown(5)\n else:\n count += 1\n plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)\n countdown(5)\nnp.save('count.npy', count)\nif False:\n countdown(5)\n print(pg.position())\n print(np.array(get_window()))\n print(np.array(pg.position()) - corner)\n",
"step-3": "<mask token>\n\n\ndef get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):\n handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)\n return win32gui.GetClientRect(handle_of_hearthstone)\n\n\ndef countdown(n):\n for i in np.arange(n, 0, -1):\n print(i)\n time.sleep(1)\n\n\ncountdown(5)\ncorner = pg.position()\nwindow = np.array(get_window())\nwindow[:2] += corner\nwindow[2:] += corner\nwindow = tuple(window)\n\n\ndef currentmouse():\n return pg.position()\n\n\ndef get_pic():\n return np.array(ImageGrab.grab(window))\n\n\ndef closewindow():\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nstates_str = ['主界面', '选牌界面', '战斗界面', '收藏界面', '搜索界面', '手牌更换', '战斗结果']\nstates_num = [0, 1, 2, 3, 4, 5, 6]\nstates = pd.DataFrame(states_str, index=states_num)\nprint(states)\ncount = np.load('count.npy')\nwhile True:\n pic = get_pic()\n cv2.imshow('output', pic)\n key = chr(cv2.waitKey(0))\n cv2.destroyAllWindows()\n if key == 'q':\n break\n elif key == 'd':\n pass\n countdown(5)\n else:\n count += 1\n plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)\n countdown(5)\nnp.save('count.npy', count)\nif False:\n countdown(5)\n print(pg.position())\n print(np.array(get_window()))\n print(np.array(pg.position()) - corner)\n",
"step-4": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport win32gui, win32ui, win32con, win32api\nimport pyautogui as pg\nfrom PIL import ImageGrab\nimport time\nimport pandas as pd\n\n\ndef get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):\n handle_of_hearthstone = win32gui.FindWindow(lpClassName, lpWindowName)\n return win32gui.GetClientRect(handle_of_hearthstone)\n\n\ndef countdown(n):\n for i in np.arange(n, 0, -1):\n print(i)\n time.sleep(1)\n\n\ncountdown(5)\ncorner = pg.position()\nwindow = np.array(get_window())\nwindow[:2] += corner\nwindow[2:] += corner\nwindow = tuple(window)\n\n\ndef currentmouse():\n return pg.position()\n\n\ndef get_pic():\n return np.array(ImageGrab.grab(window))\n\n\ndef closewindow():\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\nstates_str = ['主界面', '选牌界面', '战斗界面', '收藏界面', '搜索界面', '手牌更换', '战斗结果']\nstates_num = [0, 1, 2, 3, 4, 5, 6]\nstates = pd.DataFrame(states_str, index=states_num)\nprint(states)\ncount = np.load('count.npy')\nwhile True:\n pic = get_pic()\n cv2.imshow('output', pic)\n key = chr(cv2.waitKey(0))\n cv2.destroyAllWindows()\n if key == 'q':\n break\n elif key == 'd':\n pass\n countdown(5)\n else:\n count += 1\n plt.imsave('./dataset/{}_{}.png'.format(key, count[0]), pic)\n countdown(5)\nnp.save('count.npy', count)\nif False:\n countdown(5)\n print(pg.position())\n print(np.array(get_window()))\n print(np.array(pg.position()) - corner)\n",
"step-5": "#%%\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimport win32gui,win32ui,win32con,win32api \r\nimport pyautogui as pg\r\nfrom PIL import ImageGrab\r\nimport time\r\nimport pandas as pd\r\n\r\n# %%\r\ndef get_window(lpClassName='UnityWndClass', lpWindowName='炉石传说'):\r\n handle_of_hearthstone=win32gui.FindWindow(lpClassName,lpWindowName)\r\n return win32gui.GetClientRect(handle_of_hearthstone)\r\ndef countdown(n):\r\n for i in np.arange(n,0,-1):\r\n print(i)\r\n time.sleep(1)\r\ncountdown(5)\r\ncorner=pg.position()\r\nwindow=np.array(get_window())\r\nwindow[:2]+=corner\r\nwindow[2:]+=corner\r\nwindow=tuple(window)\r\ndef currentmouse():\r\n return pg.position()\r\ndef get_pic():\r\n return np.array(ImageGrab.grab(window))\r\ndef closewindow():\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n\r\n#%%\r\nstates_str=['主界面','选牌界面','战斗界面','收藏界面','搜索界面','手牌更换','战斗结果']\r\nstates_num=[0,1,2,3,4,5,6]\r\nstates=pd.DataFrame(states_str,index=states_num)\r\nprint(states)\r\ncount=np.load('count.npy')\r\nwhile(True):\r\n pic=get_pic()\r\n cv2.imshow('output',pic)\r\n key=chr(cv2.waitKey(0))\r\n cv2.destroyAllWindows()\r\n if key=='q':#quit\r\n break\r\n elif key=='d':#discard\r\n pass\r\n countdown(5)\r\n else:\r\n count+=1\r\n plt.imsave('./dataset/{}_{}.png'.format(key,count[0]),pic)\r\n countdown(5)\r\nnp.save('count.npy',count)\r\n\r\n#%% 收集按钮位置\r\nif False:\r\n countdown(5)\r\n print(pg.position())\r\n print(np.array(get_window()))\r\n print(np.array(pg.position())-corner)",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
from django.http import Http404
from django.shortcuts import render,render_to_response, get_object_or_404, redirect, HttpResponse
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.template import RequestContext,Context
from django.template.loader import get_template
import os, sys
from newchama import settings
from newchama.helper import member_login_required
from xhtml2pdf import pisa
import StringIO
import datetime
from services.models import Deal,CompanyWithPE,Demand, Country, Industry, Member, Company, StatusDemand, Province, City, DemandAttach, DemandIndustry, InvestmentCompany, DemandOtherTargetCompany, ListedCompany
from services.models import DemandViewLog, Message, DemandViewLog, Favorites, TypeFavorite, DemandVisitor, Project,News, StatusProject,Preference, PreferenceIndustry, PreferenceLocation, PreferenceKeyword
from services.models import ConditionDemand, DemandKeyword, DemandKeywordEn
from services.helper import Helper
from recommond.views import update_project_recommond_list
from recommond.models import RecommondProjectItem
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
import logging
import random
import zipfile
from django.db.models import Q, Sum, connection
from sets import Set as set
from django.utils import simplejson
from log.views import *
logger = logging.getLogger(__name__)
@member_login_required
def addsuccess(request):
c = {}
c['title'] = _("Buyer Recommended")
c['member'] = request.session.get('member', None)
return render_to_response("purchase/"+request.lang+"/add_success.html", c, context_instance=RequestContext(request))
def _is_has_condition(condition):
return condition != "" and condition != "0"
@member_login_required
def search(request):
c = {}
c.update(request)
c['title'] = _("Purchase Search")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
is_search = request.GET.get("is_search", "")
if is_search == '1':
is_has_condition = False
condition = ConditionDemand()
condition.status = StatusDemand.approved
demands = Demand.objects.filter(status=StatusDemand.approved)
keyword = request.GET.get("keyword", "")
country = request.GET.get("country", "")
#province = request.GET.get("province", "")
#city = request.GET.get("city", "")
type = request.GET.get("type", "")
industry_first = request.GET.get("industry_first", "")
industry_second = request.GET.get("industry_second", "")
industry_third = request.GET.get("industry_third", "")
if keyword != "":
c["keyword"] = keyword
is_has_condition = True
if type != "":
c["type"] = int(type)
is_has_condition = True
country_id = 0
#province_id = 0
#city_id = 0
if _is_has_condition(country):
country_id = int(country)
c["country"] = country_id
is_has_condition = True
industry = ""
if _is_has_condition(industry_first):
industry = industry_first
c["industry_first"] = int(industry_first)
if _is_has_condition(industry_second):
industry = industry_second
c["industry_second"] = int(industry_second)
if _is_has_condition(industry_third):
industry = industry_third
c["industry_third"] = int(industry_third)
if industry != "":
industry_condition = Industry.objects.get(pk=industry)
condition.industry = industry_condition
is_has_condition = True
condition.country_id = country_id
condition.keyword = keyword
condition.type = type
sort = "time_desc"
if is_has_condition:
data, total = find_demands(condition, 1, 5, sort)
c['has_more'] = total > 5
c['demands'] = data
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
c['is_search'] = True
c['is_has_condition'] = is_has_condition
c["SERVICE_TYPES"] = Demand.SERVICE_TYPES
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
return render_to_response("purchase/"+request.lang+"/search.html", c, context_instance=RequestContext(request))
@member_login_required
def search_keyword(request):
c = {}
c['title'] = _("Search")
keyword = request.GET.get("keyword", '')
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
member = get_object_or_404(Member,pk=member_id)
c["keyword"] = keyword
demands = Demand.objects.filter(Q(status=StatusDemand.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword)))
c['demands'] = demands[0:5]
c['total_project'] = Project.objects.filter(Q(status=StatusProject.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword))).count()
c['total_demand'] = demands.count()
c['total_news'] = News.objects.filter(Q(title__contains=keyword) | Q(tag__contains=keyword)).count()
c['total_company'] = Company.objects.filter(Q(short_name_cn__contains=keyword) | Q(short_name_en__contains=keyword)).exclude(id=27).count()
c['total_member'] = Member.objects.filter(Q(last_name__contains=keyword) | Q(first_name__contains=keyword)).count()
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
write_search_demand_log(request,member,keyword)
return render_to_response("purchase/"+request.lang+"/search_keyword.html", c, context_instance=RequestContext(request))
@member_login_required
def new(request):
c = {}
c['title'] = _("New Purchase")
c['member'] = request.session.get('member', None)
# member_id = request.session['member']['id']
# demands = Demand.objects.filter(status=StatusDemand.approved)
# sort = request.GET.get("sort", "time_desc")
# c['sort'] = sort
# c[sort] = "active"
# if sort == "time_desc":
# demands = demands.order_by("-id")
# elif sort == "time_asc":
# demands = demands.order_by("id")
# check the preference is setting
# pi = PreferenceIndustry.objects.filter(preference__member__id=member_id, preference__title="demand")
# pn = PreferenceKeyword.objects.filter(preference__member__id=member_id, preference__title="demand")
# pl = PreferenceLocation.objects.filter(preference__member__id=member_id, preference__title="demand")
# if len(pi) == 0 and len(pl) == 0 and len(pn) == 0:
# c['need_preference'] = True
# c['demands'] = demands[0:50]
# else:
# c['need_preference'] = False
# c['demands'] = demands[0:10]
# check finish
member_id = request.session['member']['id']
type = request.GET.get('type', 0)
keyword = request.GET.get('keywords', '')
country_id = request.GET.get('country_id', 0)
province_id = request.GET.get('province_id', 0)
industry_id = request.GET.get('industry_id', 0)
sort = request.GET.get('sort', 'time_desc')
condition = ConditionDemand()
condition.country_id = country_id
condition.keyword = keyword
condition.status = StatusDemand.approved
condition.province_id = province_id
condition.type = type
level = 1
if industry_id != "" and industry_id != "0" and industry_id != 0:
condition.industry = Industry.objects.get(pk=industry_id)
level = condition.industry.level
pagesize = 10
data, total = find_demands(condition, 1, pagesize, sort)
c["have_more_data"] = len(data) == int(pagesize)
c['demands'] = data
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
c['countries'] = Helper.find_countries()
c['industries'] = Helper.find_industries_level1()
c['total'] = total
return render_to_response("purchase/"+request.lang+"/new.html", c, context_instance=RequestContext(request))
@member_login_required
def banking_genius(request, id_post):
c = {}
c['title'] = _("New Purchase")
c['member'] = request.session.get('member', None)
try:
page = request.GET.get('page', 0)
pagesize = request.GET.get('pagesize', 10)
condition = recommendCondiction(request, id_post)
c["totalRecommend"] = RecommondProjectItem.objects.filter(condition).count()
c["recommendList"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[page : pagesize]
except Exception, e:
logger.error(e.message)
c["id"] = id_post
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
c["project_title"] = Demand.objects.get(pk=id_post).name_cn
member_id = request.session['member']['id']
c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)
return render_to_response("purchase/"+request.lang+"/banking_genius.html", c, context_instance=RequestContext(request))
@member_login_required
def json_recommend(request):
c = {}
id_post = request.POST.get("id", False)
if request.method == "POST":
try:
page = request.POST.get('page', 1)
pagesize = request.POST.get('pagesize', 10)
if page <= 1:
page = 1
if pagesize <= 1:
pagesize = 1
start_record = (int(page)-1) * int(pagesize)
end_record = int(start_record) + int(pagesize)
if id_post:
condition = recommendCondiction(request, id_post)
c["recommendList"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[start_record : end_record]
member_id = request.session['member']['id']
c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)
except Exception, e:
logger.error(e. message)
return render_to_response("purchase/"+request.lang+"/json_recommend.html", c, context_instance=RequestContext(request))
@member_login_required
def json_recommend_count(request):
count = 0
id_post = request.POST.get("id", False)
if request.method == "POST":
try:
if id_post:
condition = recommendCondiction(request, id_post)
count = RecommondProjectItem.objects.filter(condition).count()
print count
except Exception, e:
logger.error(e. message)
return HttpResponse(count)
@member_login_required
def sync_recommond(request):
result = "success"
try:
if request.method == "POST":
id_post = request.POST.get("id", False)
if id_post:
d = Demand.objects.get(pk=id_post)
project_list=Project.objects.filter(status=2).filter(expire_date__gt=datetime.datetime.now()).order_by('-id')
update_project_recommond_list(d, project_list)
result = "success"
except Exception, e:
print e.message
logger.error(e.message)
return HttpResponse(result)
def recommendCondiction(request, id):
condition = Q(demand_id=id, is_delete=False)
condition2 = Q()
target_location_id = request.POST.get('target_location_id', 0)
target_industry_id = request.POST.get('target_industry_id', 0)
target_location_type = request.POST.get('target_location_type', 0)
if target_location_id != "0" and target_location_id != 0:
if target_location_type == "province":
condition2 = condition2 | Q (company_province=target_location_id)
else:
condition2 = condition2 | Q (company_country=target_location_id)
if target_industry_id != "0" and target_industry_id != 0:
condition2 = condition2 | Q (company_industry=target_industry_id)
if target_location_id != "0" or target_industry_id != "0":
p = Project.objects.filter(condition2)
condition = condition & Q (project=p)
return condition
def preferenceByMemberId(c, member_id):
list = []
preferences = Preference.objects.filter(member_id=member_id, title="demand")[0: 1]
condition = Q(status=StatusDemand.approved)
if len(preferences) > 0:
condition2 = Q()
p = preferences[0]
c['preference_demand_id'] = p.id
preference_project_industries = p.preference_industry.all() #PreferenceIndustry.objects.filter(preference__member__id=member['id'])
c['pre_demand_indusrtis'] = preference_project_industries
if len(preference_project_industries) > 0:
for ppi in preference_project_industries:
condition2 = condition2 | Q (company_industries=ppi.industry_id)
preference_project_location = p.preference_location.all()
c['pre_demand_locations'] = preference_project_location
if len(preference_project_location):
for ppl in preference_project_location:
condition2 = condition2 | Q (company_countries=ppl.country_id)
condition = condition & condition2
list = Demand.objects.filter(condition).order_by("-id").distinct()[0: 3]
return list
def demandByMemberId(member_Id):
demands = Demand.objects.filter(member_id=member_Id, status=StatusDemand.approved).order_by("-id")[0: 5]
list_demand = []
for demand in demands:
count_message = Message.objects.filter(type_relation=2, demand=demand.id, is_read=0).count()
count_favor = Favorites.objects.filter(type_relation=1, demand=demand.id).count()
company_industries = demand.company_industries.all()
count_company = 0
count_industry = 0
if company_industries:
industry_ids = []
industry_level_1_id = []
for c in company_industries:
industry_ids.append(c.id)
industry_level = c.level
industry_id = c.id
if industry_level == 2:
industry_id = c.father_id
elif industry_level == 3:
industry_id = c.father.father_id
industry_level_1_id.append(industry_id)
count_company = Company.objects.filter(industry__in=industry_ids, status=1).exclude(id=27).count()
#start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)
start_date = datetime.datetime.today()-datetime.timedelta(days=90)
count_industry = Deal.objects.filter(cv1__in=industry_level_1_id, happen_date__gt=start_date).count()
pro = {}
pro["demand"] = demand
pro["count_message"] = count_message
pro["count_favor"] = count_favor
pro["count_industry"] = count_industry
pro["count_company"] = count_company
list_demand.append(pro)
return list_demand
def countDemandStuffTotal(member_id):
pvs = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved).aggregate(sum_pv=Sum('pv'))
messages = 0#Message.objects.filter(type_relation=2, demand__member__id=member_id, is_read=0, is_delete=0).count()
favorites = Favorites.objects.filter(type_relation=2, demand__member__id=member_id).count()
cursor = connection.cursor()
demands = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved)
industry_ids = []
industry_ids_cv1 = []
if demands:
for d in demands:
for cv1 in d.demand_industries.all():
industry_ids_cv1.append(cv1.cv1)
for industry in d.company_industries.all():
industry_ids.append(industry.id)
recommend_companies = Company.objects.filter(industry__in=set(industry_ids), status=1).exclude(id=27).count()
#start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)
start_date = datetime.datetime.today()-datetime.timedelta(days=90)
recommend_industries = Deal.objects.filter(cv1__in=set(industry_ids_cv1), happen_date__gt=start_date).count()
count_demand_all = {}
count_demand_all["pvs"] = pvs["sum_pv"]
count_demand_all["messages"] = messages
count_demand_all["favorites"] = favorites
count_demand_all["recommend_companies"] = recommend_companies
count_demand_all["recommend_industries"] = recommend_industries
return count_demand_all
@csrf_exempt
@member_login_required
def json_index(request):
c = {}
member_id = request.session['member']['id']
if request.method == 'POST':
try:
condition = Q(status=StatusDemand.approved)
condition2 = Q()
industryIds = request.GET.get("industryId", False)
if industryIds and industryIds != "0":
ids = industryIds.split(",")
for id in ids:
condition2 = condition2 | Q(company_industries=id)
locationIds = request.GET.get("locationId", False)
if locationIds and locationIds != "0":
ids = locationIds.split(",")
for id in ids:
condition2 = condition2 | Q(company_countries=id)
condition = condition & condition2
if industryIds == False and locationIds == False:
result_list = preferenceByMemberId(c, member_id)
else:
result_list = Demand.objects.filter(condition).order_by("-id").distinct()[0 : 3]
c["result_list"] = result_list
list_demand_preference_plus = 3 - len(result_list)
if list_demand_preference_plus > 0:
c['recent_demand'] = Demand.objects.filter(status=StatusDemand.approved).order_by("-id")[0: list_demand_preference_plus]
except Exception, e:
# print e.message
logger.error('show demand json error!' + e.message)
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
return render_to_response("purchase/"+request.lang+"/json_index.html", c, context_instance=RequestContext(request))
def countResult(result):
resultList = {}
# total_recommends = RecommondItem.objects.filter(is_delete=0, project__id=result.id).count()
total_favorites = Favorites.objects.filter(type_relation=2, demand__id=result.id).count()
not_read_messages = Message.objects.filter(type_relation=2, demand__id=result.id, is_read=0).count()
resultList['total_recommends'] = RecommondProjectItem.objects.filter(demand=result, project__status=StatusProject.approved).count()
resultList['total_target'] = 0
resultList['total_favorites'] = total_favorites
resultList['not_read_messages'] = not_read_messages
resultList['id'] = result.id
resultList['name_cn'] = result.name_cn
resultList['name_en'] = result.name_en
resultList['status'] = result.status
resultList['statusName'] = result.get_status_display
resultList['processName'] = result.get_process_display
resultList['add_time'] = result.add_time
resultList['pvs'] = result.pv
resultList['integrity'] = result.integrity
return resultList
@member_login_required
def mylist(request, type):
c = {}
c.update(request)
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by("-id")
c['total_all'] = demands.count()
result_list_2 = []
for result in demands:
result_list_2.append(countResult(result))
c['demands'] = result_list_2
c[type] = "active"
c['type'] = type
'''
demands_release = demands.filter(status=StatusDemand.approved)
demands_draft = demands.filter(status=StatusDemand.draft)
demands_pending = demands.filter(status=StatusDemand.pending)
demands_not_approved = demands.filter(status=StatusDemand.not_approved)
demands_offline = demands.filter(status=StatusDemand.offline)
demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)
d_list = {"release": demands_release, "draft": demands_draft, "pending": demands_pending, "not_approved": demands_not_approved, "expired": demands_expired}
d_list.update({"offline": demands_offline, "all": demands})
result_list = d_list.get(type, demands)
result_list_2 = []
for result in result_list:
result_list_2.append(countResult(result))
c['result_list'] = result_list_2
total_all = demands.count()
total_release = demands_release.count()
total_pending = demands_pending.count()
total_draft = demands_draft.count()
total_offline = demands_offline.count()
total_not_approved = demands_not_approved.count()
c['total_all'] = total_all
c['total_release'] = total_release
c['total_pending'] = total_pending
c['total_offline'] = total_offline
c['total_not_approved'] = total_not_approved
c['total_draft'] = total_draft
total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()
c['total_project'] = total_project
c[type] = "active"
c['type'] = type
c['demands'] = result_list_2
'''
return render_to_response("purchase/"+request.lang+"/mylist.html", c, context_instance=RequestContext(request))
'''
@member_login_required
def mylist(request, type, id=0):
c = {}
c.update(request)
c['title'] = _("My Purchases")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by("-update_time")
demands_public = demands.filter(target_members=None, target_companies=None, target_industries=None)
demands_private = demands.exclude(target_members=None, target_companies=None, target_industries=None)
demands_release = demands.filter(status=StatusDemand.approved)
demands_draft = demands.filter(status=StatusDemand.draft)
demands_pending = demands.filter(status=StatusDemand.pending)
demands_not_approved = demands.filter(status=StatusDemand.not_approved)
demands_offline = demands.filter(status=StatusDemand.offline)
demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)
d_list = {"release": demands_release, "draft": demands_draft, "pending": demands_pending, "not_approved": demands_not_approved, "expired": demands_expired}
d_list.update({"offline": demands_offline, "all": demands, "public": demands_public, "private": demands_private})
result_list = d_list.get(type, demands)
total = result_list.count()
c['total'] = total
total_all = demands.count()
total_public = demands_public.count()
total_private = demands_private.count()
total_draft = demands_draft.count()
c['total_all'] = total_all
c['total_public'] = total_public
c['total_private'] = total_private
c['total_draft'] = total_draft
total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()
c['total_project'] = total_project
if total == 0:
return render_to_response("purchase/"+request.lang+"/mylist_empty.html", c, context_instance=RequestContext(request))
ids = []
for m in result_list:
ids.append(m.id)
id_current = int(id)
if id_current == 0:
demand = result_list[0]
id_current = demand.id
else:
if id_current not in ids:
raise Http404
pageIndex = ids.index(id_current)+1
demand = result_list[pageIndex-1]
pageIndex = ids.index(id_current)+1
#c['result_list'] = result_list
pageTotal = total
c['pageTotal'] = pageTotal
page_start = 1
page_end = 10
if pageIndex >= 5:
page_start = pageIndex - 4
page_end = pageIndex + 5
if page_end > pageTotal:
page_end = pageTotal
pages = ids[page_start-1:page_end]
id_list_top = enumerate(pages, start=page_start)
id_list = enumerate(pages, start=page_start)
c['id_list_top'] = id_list_top
c['id_list'] = id_list
c['page_start'] = page_start
c['page_end'] = page_end
c[type] = "active"
c['type'] = type
c['d'] = demand
c['pageIndex'] = pageIndex
c['id_current'] = id_current
c['first_id'] = ids[0]
c['end_id'] = ids[total-1]
if pageIndex > 1:
c['pre_id'] = ids[pageIndex-1]
if pageIndex < pageTotal:
c['next_id'] = ids[pageIndex]
if page_end < pageTotal:
c['next_id_page_end'] = ids[page_end]
visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by("-add_time")
c['visitors'] = visitors
c['visitors_count'] = visitors.count()
followers = Favorites.objects.filter(demand_id=demand.id).order_by("-add_time")
c['followers'] = followers
message_list = Message.objects.filter(demand_id=demand.id).order_by("-add_time")
c['message_list'] = message_list
if len(demand.company_industries.all())>0:
#之后用cv1替代
if demand.company_industries.all()[0].level==3:
c['deal_list_more_id']=demand.company_industries.all()[0].father.father.id
elif demand.company_industries.all()[0].level==2:
c['deal_list_more_id']=demand.company_industries.all()[0].father.id
else:
c['deal_list_more_id']=demand.company_industries.all()[0].id
c['deal_list'] =Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]
c['compare_cn']= CompanyWithPE.objects.filter(country__name_en='China',industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]
c['compare_usa']= CompanyWithPE.objects.filter(country__name_en='United States of America',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_hk']= CompanyWithPE.objects.filter(country__name_en='Hong Kong',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_uk']= CompanyWithPE.objects.filter(country__name_en='United Kingdom',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
return render_to_response("purchase/"+request.lang+"/mylist.html", c, context_instance=RequestContext(request))
'''
@member_login_required
def mydetail(request, id):
c = {}
member = request.session.get('member', None)
c['member'] = member
member_id = request.session['member']['id']
demand = get_object_or_404(Demand, pk=id, member_id=member_id)
# c['d'] = demand
m = countResult(demand)
c['process'] = Demand.PROCESS
c['d'] = m
_visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by("-add_time")[0:8]
visitors=[]
c['recommendList'] = RecommondProjectItem.objects.filter(demand=demand, project__status=StatusProject.approved).order_by("project__update")[0:5]
for v in _visitors:
if v.member.email.find('@newchama.com')==-1:
visitors.append(v)
c['visitors'] = visitors
c['visitors_count'] = len(visitors)
followers = Favorites.objects.filter(demand_id=demand.id).order_by("-add_time")
c['followers'] = followers
message_list = Message.objects.filter(demand_id=demand.id).order_by("-add_time")
c['message_list'] = message_list
if len(demand.company_industries.all()) > 0:
#之后用cv1替代
if demand.company_industries.all()[0].level == 3:
c['deal_list_more_id'] = demand.company_industries.all()[0].father.father.id
elif demand.company_industries.all()[0].level == 2:
c['deal_list_more_id'] = demand.company_industries.all()[0].father.id
else:
c['deal_list_more_id'] = demand.company_industries.all()[0].id
c['deal_list'] = Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]
c['compare_cn'] = CompanyWithPE.objects.filter(country__name_en='China', industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]
c['compare_usa'] = CompanyWithPE.objects.filter(country__name_en='United States of America', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_hk'] = CompanyWithPE.objects.filter(country__name_en='Hong Kong', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
c['compare_uk'] = CompanyWithPE.objects.filter(country__name_en='United Kingdom', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]
return render_to_response("purchase/"+request.lang+"/mydetail.html", c, context_instance=RequestContext(request))
def ajax_more(request):
c = {}
member = request.session.get('member', None)
if member is None:
return None
member_id = request.session['member']['id']
page = request.GET.get('page', 1)
pagesize = request.GET.get('pagesize', 10)
type = request.GET.get('type', 0)
keyword = request.GET.get('keywords', '')
country_id = request.GET.get('country_id', 0)
province_id = request.GET.get('province_id', 0)
industry_id = request.GET.get('industry_id', 0)
sort = request.GET.get('sort', 'time_desc')
condition = ConditionDemand()
condition.country_id = country_id
condition.keyword = keyword
condition.status = StatusDemand.approved
condition.province_id = province_id
condition.type = type
level = 1
if industry_id != "" and industry_id != "0" and industry_id != 0:
condition.industry = Industry.objects.get(pk=industry_id)
level = condition.industry.level
data, total = find_demands(condition, page, pagesize, sort)
c['demands'] = data
c["have_more_data"] = len(data) == int(pagesize)
c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)
return render_to_response("purchase/"+request.lang+"/ajax_list.html", c, context_instance=RequestContext(request))
@member_login_required
@csrf_protect
def add(request):
c = {}
c.update(csrf(request))
c['title'] = _("Add Purchase")
c['member'] = request.session.get('member', None)
u = Demand()
u.valid_day = 60
# if request.method == "POST":
# u = Demand()
# name_en = request.POST["name_en"]
# name_cn = request.POST["name_cn"]
# if name_en == "" and name_cn == "":
# isvalid = False
# messages.warning(request, _("please input demand name"))
# submitStatus = request.POST["submitStatus"]
# redirect_url = "purchase.mylist_pending"
# if submitStatus == "draft":
# u.status = StatusDemand.draft
# redirect_url = "purchase.mylist_draft"
# else:
# u.status = StatusDemand.pending
# _bind_data(request, u)
# if isvalid:
# try:
# u.financial_year = datetime.datetime.today().year
# u.save()
# _save_items(request, u)
# return redirect(redirect_url)
# except Exception, e:
# messages.warning(request, e.message)
# logging.error(e.message)
c['target_companies_count'] = 0
c["u"] = u
c['readSuitorRelate'] = False
_load_types(c)
return render_to_response("purchase/"+request.lang+"/add.html", c, context_instance=RequestContext(request))
@member_login_required
@csrf_protect
def edit(request, id):
c = {}
c.update(csrf(request))
c['title'] = _("Edit Purchase")
c['member'] = request.session.get('member', None)
member_id = c['member']['id']
isvalid = True
u = get_object_or_404(Demand, pk=id, member_id=member_id)
c['attachments'] = u.demand_attach.all()
c['u'] = u
c["other_target_companies"] = DemandOtherTargetCompany.objects.filter(demand__id=u.id)
countrySelected = u.company_countries.all()
if countrySelected:
c['company_country'] = countrySelected[0]
provinceSelected = u.company_provinces.all()
if provinceSelected:
c['company_province'] = provinceSelected[0]
industrySelected = u.demand_industries.all()
if industrySelected:
c['company_industry'] = industrySelected[0]
c['target_companies_count'] = u.target_companies.all().count()
c['readSuitorRelate'] = True
if request.lang == "en-us":
mks = u.demand_keyword_en.all()
else:
mks = u.demand_keyword.all()
c['mks'] = mks
keywords = ""
if len(mks) > 0:
for m in mks:
keywords += m.keyword + ","
keywords = keywords[0 : len(keywords) - 1]
c['keywords'] = keywords
_load_types(c)
member = get_object_or_404(Member,id=member_id)
write_demand_edit_log(request,member,u)
return render_to_response("purchase/"+request.lang+"/add.html", c, context_instance=RequestContext(request))
@member_login_required
def detail(request, id):
c = {}
c['title'] = _("Purchase Detail")
member = request.session.get('member', None)
c['member'] = member
member_id = request.session['member']['id']
if Helper.hasAgentRole(member['company_type']):
messages.warning(request, _("You have no permission to visit this page"))
return render_to_response("services/error_message.html", c, context_instance=RequestContext(request))
demand = get_object_or_404(Demand, pk=id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
messages.warning(request, _("not target"))
return render_to_response("services/error_message.html", c, context_instance=RequestContext(request))
# return HttpResponse(_("not target"))
c['d'] = demand
#c['last_year'] = demand.financial_year-1
#demands_other = Demand.objects.filter(member_id=demand.member_id, status=StatusDemand.approved, is_anonymous=False).exclude(id=id)[0:5]
#c['demands_other'] = demands_other
#demands_recommend = Demand.objects.filter(service_type=demand.service_type, status=StatusDemand.approved).exclude(id=id).order_by("-pv")[0:5]
#c['demands_recommend'] = demands_recommend
c['message_list'] = Message.objects.filter(type_relation=2, demand=demand, is_delete=0).order_by('-add_time')
if demand.member_id == member_id:
c['is_own'] = True
member = Member.objects.get(id=member_id)
member.view_demand(demand)
if request.lang == "en-us":
mks = demand.demand_keyword_en.all()
else:
mks = demand.demand_keyword.all()
keywords = ""
if len(mks) > 0:
for m in mks:
keywords += m.keyword + ","
keywords = keywords[0 : len(keywords) - 1]
c['keywords'] = keywords
c['is_added_favorite'] = member.is_added_demand_to_favorites(demand)
c['is_expired']=datetime.date.today() > demand.expire_date
url = "/detail.html"
type = request.GET.get("type", "")
c['type'] = type
if type == "1":
url = "/view.html"
else:
write_demand_view_log(request,member,demand, type)
return render_to_response("purchase/"+request.lang+ url, c, context_instance=RequestContext(request))
@member_login_required
def pdf(request, id):
reload(sys)
sys.setdefaultencoding('utf8')
c = {}
c['title'] = _("Purchase Detail")
c['member'] = request.session.get('member', None)
member_id = request.session['member']['id']
demand = get_object_or_404(Demand, pk=id)
member = Member.objects.get(pk=member_id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
return HttpResponse(_("not target"))
c['d'] = demand
c['last_year'] = demand.financial_year-1
c['static_root'] = settings.STATICFILES_DIRS[0]
template = get_template("purchase/"+request.lang+"/detail_pdf.html")
html = template.render(Context(c))
#print(html)
file = StringIO.StringIO()
#file = open(os.path.join(settings.MEDIA_ROOT, 'test.pdf'), "w+b")
pisaStatus = pisa.CreatePDF(html, dest=file)
# Return PDF document through a Django HTTP response
file.seek(0)
pdf = file.read()
file.close() # Don't forget to close the file handle
member.print_demand(demand)
write_demand_teaser_view_log(request,member,demand)
return HttpResponse(pdf, mimetype='application/pdf')
@member_login_required
def save(request):
response_data = {}
response_data['result'] = 'failed'
if request.method == "POST":
try:
name_en = request.POST["name_en"]
name_cn = request.POST["name_cn"]
if name_en == "" and name_cn == "":
response_data['message'] = _("please input demand name")
else:
#check stock_symbol is correct
company_stock_symbol = request.POST.get("company_stock_symbol", False)
is_list_company = int(request.POST.get("is_list_company", 0))
if company_stock_symbol and is_list_company == 1:
checksymbolExsit = ListedCompany.objects.filter(stock_symbol=company_stock_symbol)
if len(checksymbolExsit) == 0:
response_data['message'] = 'symbolNotExsit'
return HttpResponse(simplejson.dumps(response_data), content_type="text/plain")
submitStatus = request.POST["submitStatus"]
u = Demand()
isExsit = False
id_post = request.POST.get("id", False)
#check the demand is exsit with member_id
condition = Q(member_id=request.session["member"]["id"])
condition2 = Q()
if id_post:
condition = condition & ~Q(pk=id_post)
if name_cn.strip() != "":
condition2 = condition2 | Q(name_cn=name_cn.strip())
if name_en.strip() != "":
condition2 = condition2 | Q(name_en=name_en.strip())
project = Demand.objects.filter(condition & condition2)
if project:
isExsit = True
response_data['message'] = "demandExsit"
if isExsit is False:
if id_post:
u = Demand.objects.get(pk=id_post)
if u.status != StatusDemand.approved: #Terry mark, when the project is approved then do not reset the pending status
if submitStatus == "draft":
u.status = StatusDemand.draft
else:
u.status = StatusDemand.pending
bool, msg = _bind_data(request, u)
if bool:
response_data['result'] = 'success'
response_data['id'] = u.id
response_data['message'] = '操作成功'
else:
response_data['message'] = msg
except Exception, e:
logger.error(e.message)
response_data['message'] = e.message
return HttpResponse(simplejson.dumps(response_data), content_type="text/plain")
def _load_types(c):
c["current_year"] = datetime.datetime.today().year
c["last_year"] = datetime.datetime.today().year-1
c["FINANCIAL_TYPES"] = Demand.FINANCIAL_TYPES
c["FINANCIAL_TYPES_2"] = Demand.FINANCIAL_TYPES_2
c["STOCK_STRUCTURE_PERCENTAGE_TYPES"] = Demand.STOCK_STRUCTURE_PERCENTAGE_TYPES
c["CURRENCY_TYPES"] = Demand.CURRENCY_TYPES
c["EMPLOYEES_COUNT_TYPES"] = Demand.EMPLOYEES_COUNT_TYPES
c["SERVICE_TYPES"] = Demand.SERVICE_TYPES
c["SERVICE_TYPES_2"] = Demand.SERVICE_TYPES_2
c["countries"] = Helper.find_countries()
c["industries"] = Helper.find_industries_level1()
c["members"] = Member.objects.all()
c["companies"] = Company.objects.all().exclude(id=27)
def _bind_data(request, u):
has_attach = False
upload_types = request.POST.getlist("upload_types", [])
for ut in upload_types:
uf = request.FILES.get("upload_file_" + ut, False)
if uf:
file_ext = os.path.splitext(uf.name)[1].lower()
if uf.size > 20000000:
return False, "tooBig"
#return _("The file cannot be more than 20M")
if file_ext != ".doc" and file_ext != ".docx" and file_ext != ".pdf" and file_ext != ".ppt" and file_ext != ".pptx":
return False, "typeError"
#return _("The file must be 'doc|docx|pdf'")
has_attach = True
integrity = 0
u.name_cn = request.POST.get("name_cn", None)
u.name_en = request.POST.get("name_en", None)
if request.POST.get("name_cn", False) or request.POST.get("name_en", False):
integrity = integrity + 1
if request.POST.get("service_type", False):
u.service_type = request.POST["service_type"]
integrity = integrity + 1
pay_currency = request.POST.get("pay_currency", False)
if pay_currency and pay_currency != "":
u.pay_currency = pay_currency.replace(",", "")
integrity = integrity + 1
u.is_list_company = int(request.POST.get("is_list_company", 0))
integrity = integrity + 1
project_relation = request.POST.get("project_relation", False)
if project_relation and project_relation != "":
u.project_relation = project_relation.replace(",", "")
integrity = integrity + 1
valid_day = int(request.POST.get("valid_day", 0))
u.valid_day = valid_day
u.expire_date = datetime.datetime.today() + datetime.timedelta(days=int(valid_day))
integrity = integrity + 1
u.is_anonymous = int(request.POST.get("is_anonymous", "0"))
integrity = integrity + 1
exist_upload_names = request.POST.getlist("exist_upload_names", [])
if has_attach or exist_upload_names:
integrity = integrity + 1
u.has_attach = True
else:
u.has_attach = False
#country
#industry
#project_keyword
u.employees_count_type = request.POST.get("employees_count_type", None)
if request.POST.get("employees_count_type", False):
integrity = integrity + 1
if request.POST.get("stock_structure_percentage_type_institutional", False):
u.stock_structure_percentage_type_institutional = request.POST["stock_structure_percentage_type_institutional"]
if request.POST.get("stock_structure_percentage_type_management", False):
u.stock_structure_percentage_type_management = request.POST["stock_structure_percentage_type_management"]
if request.POST.get("stock_structure_percentage_type_private", False):
u.stock_structure_percentage_type_private = request.POST["stock_structure_percentage_type_private"]
if request.POST.get("stock_structure_percentage_type_institutional", False) or request.POST.get("stock_structure_percentage_type_institutional", False) or request.POST.get("stock_structure_percentage_type_private", False):
integrity = integrity + 1
u.currency_type_financial = request.POST.get("currency_type", None)
integrity = integrity + 1
expected_enterprice_value_enter = request.POST.get("expected_enterprice_value_enter", False)
if expected_enterprice_value_enter and expected_enterprice_value_enter != "":
u.expected_enterprice_value_enter = expected_enterprice_value_enter.replace(",", "")
integrity = integrity + 1
#new column
stock_percent = request.POST.get("stock_percent", False)
if stock_percent and stock_percent != "":
u.stock_percent = stock_percent
integrity = integrity + 1
deal_size_enter = request.POST.get("deal_size_enter", False)
if deal_size_enter and deal_size_enter != "":
u.deal_size_enter = deal_size_enter.replace(",", "")
integrity = integrity + 1
income_last_phase_enter = request.POST.get("income_last_phase_enter", False)
if income_last_phase_enter and income_last_phase_enter != "":
u.income_last_phase_enter = income_last_phase_enter.replace(",", "")
integrity = integrity + 1
profit_last_phase_enter = request.POST.get("profit_last_phase_enter", False)
if profit_last_phase_enter and profit_last_phase_enter != "":
u.profit_last_phase_enter = profit_last_phase_enter.replace(",", "")
integrity = integrity + 1
ebitda = request.POST.get("ebitda", False)
if ebitda and ebitda != "":
u.ebitda = ebitda.replace(",", "")
integrity = integrity + 1
u.audit_status = int(request.POST.get("audit_status", 0))
integrity = integrity + 1
u.process = request.POST.get("process", 0)
'''
no input start
'''
member_id = request.session["member"]["id"]
if member_id != "0" and member_id != "":
u.member = Member.objects.get(pk=member_id)
u.business_cn = request.POST.get("business_cn", None)
u.business_en = request.POST.get("business_en", None)
# if request.POST.get("business_cn", False) or request.POST.get("business_en", False):
# integrity = integrity + 1
u.company_stock_symbol = request.POST.get("company_stock_symbol", None)
#u.company_symbol = request.POST["company_symbol"]
financial_is_must_audit = int(request.POST.get("financial_is_must_audit", 0))
u.financial_is_must_audit = financial_is_must_audit
if financial_is_must_audit == 1:
u.financial_audit_company_is_must_default = True
elif financial_is_must_audit == 2:
u.financial_audit_company_is_must_default = False
# if request.POST["growth_three_year"] != "":
# u.growth_three_year = request.POST["growth_three_year"]
# integrity = integrity + 1
deal_size = request.POST.get("deal_size", False)
if deal_size and deal_size != "":
u.deal_size = deal_size
# integrity = integrity + 1
if request.POST.get("income", False):
u.income = request.POST["income"]
if request.POST.get("income_last_phase", False):
u.income_last_phase = request.POST["income_last_phase"]
# integrity = integrity + 1
u.intro_cn = request.POST.get("intro_cn", None)
u.intro_en = request.POST.get("intro_en", None)
# if request.POST.get("intro_cn", False) or request.POST.get("intro_en", False):
# integrity = integrity + 1
u.is_suitor = int(request.POST.get("is_suitor", "0"))
# u.net_assets = request.POST["net_assets"]
if request.POST.get("profit", False):
u.profit = request.POST["profit"]
if request.POST.get("profit_last_phase", False):
u.profit_last_phase = request.POST["profit_last_phase"]
if request.POST.get("registered_capital", False):
u.registered_capital = request.POST["registered_capital"]
total_assets_last_phase = request.POST.get("total_assets_last_phase", False)
if total_assets_last_phase and total_assets_last_phase != "":
u.total_assets_last_phase = total_assets_last_phase.replace(",", "")
# u.remark_cn = request.POST["remark_cn"]
# u.remark_en = request.POST["remark_en"]
# u.financial_audit_company_name = request.POST["financial_audit_company_name"]
if request.POST.get("expected_enterprice_value", False):
u.expected_enterprice_value = request.POST["expected_enterprice_value"]
# integrity = integrity + 1
name_project_cn = request.POST.get("name_project_cn", False)
if name_project_cn:
u.name_project_cn = name_project_cn
else:
u.name_project_cn = ""
name_project_en = request.POST.get("name_project_en", False)
if name_project_en:
u.name_project_en = name_project_en
else:
u.name_project_en = ""
# if request.POST.get("name_project_cn", False) or request.POST.get("name_project_en", False):
# integrity = integrity + 1
project_stage = request.POST.get("project_stage", False)
if project_stage and project_stage != "":
u.project_stage = project_stage.replace(",", "")
pay_way = request.POST.get("pay_way", False)
if pay_way and pay_way != "":
u.pay_way = pay_way.replace(",", "")
income_enter = request.POST.get("income_enter", False)
if income_enter and income_enter != "":
u.income_enter = income_enter.replace(",", "")
profit_enter = request.POST.get("profit_enter", False)
if profit_enter and profit_enter != "":
u.profit_enter = profit_enter.replace(",", "")
# if request.POST.get("income", False) or request.POST.get("income_enter", False):
# integrity = integrity + 1
#
# if request.POST.get("profit", False) or request.POST.get("profit_enter", False):
# integrity = integrity + 1
total_assets = request.POST.get("total_assets", False)
if total_assets and total_assets != "":
u.total_assets = total_assets.replace(",", "")
# integrity = integrity + 1
total_profit = request.POST.get("total_profit", False)
if total_profit and total_profit != "":
u.total_profit = total_profit.replace(",", "")
# integrity = integrity + 1
'''
no input end
'''
#new column end
u.save()
u.demand_attach.all().delete()
exist_upload_names = request.POST.getlist("exist_upload_names", [])
exist_upload_newNames = request.POST.getlist("exist_upload_newNames", [])
upload_type_names = request.POST.getlist("upload_type_names", [])
upload_types = request.POST.getlist("upload_types", [])
for ut, tn in zip(upload_types, upload_type_names):
uf = request.FILES.get("upload_file_" + ut, False)
if uf:
# if uf.size > 2000000:
# messages.error(request, _("The file cannot be more than 2M"))
# return
da = DemandAttach()
da.demand = u
da.file_name = uf.name
da.file_type = ut
da.file_type_name = tn
da.new_name = _upload_project_file(uf)
da.save()
else:
for t, f, n in zip(upload_types, exist_upload_names, exist_upload_newNames): #if upload not exsit, check the file that has already exsit file
if t == ut:
da = DemandAttach()
da.demand = u
da.file_name = f
da.file_type = ut
da.file_type_name = tn
da.new_name = n
da.save()
break
countries_ids = request.POST.getlist("country", [])
if countries_ids is not None:
integrity = integrity + 1
for id in countries_ids:
if id != "0" and id != "":
u.company_countries = countries_ids
provinces_ids = request.POST.getlist("province", [])
if provinces_ids is not None:
for id in provinces_ids:
if id != "0" and id != "":
c = Province.objects.get(pk=id)
u.company_provinces.add(c)
targetCompanies = request.POST.getlist("target_companies", [])
if targetCompanies:
u.target_companies = request.POST.getlist("target_companies", [])
# industries_ids = request.POST.getlist("industry", [])
industries_ids = request.POST.getlist("industry_id", [])
u.company_industries.clear()
DemandIndustry.objects.filter(demand_id=u.id).delete();
if industries_ids is not None:
integrity = integrity + 1
for id in industries_ids:
if id != "0" and id != "":
c = Industry.objects.get(pk=id)
u.company_industries.add(c)
di = DemandIndustry()
di.demand = u
if c.level == 3:
di.cv3 = c.id
di.cv2 = c.father_id
di.cv1 = c.father.father_id
elif c.level == 2:
di.cv2 = c.id
di.cv1 = c.father_id
else:
di.cv1 = c.id
di.save()
demand_keyword = request.POST.get("project_keyword", False)
if request.lang == "en-us":
u.demand_keyword_en.all().delete()
else:
u.demand_keyword.all().delete()
if demand_keyword:
integrity = integrity + 1
mks = demand_keyword.split(",")
for m in mks:
if request.lang == "en-us":
k = DemandKeywordEn()
else:
k = DemandKeyword()
k.keyword = m
k.demand = u
k.save()
integrity = int(integrity * 100 / 21)
if request.lang == "zh-cn":
u.integrity = integrity
else:
u.integrity_en = integrity
u.save()
return True, "ok"
def _clear_items(u):
u.company_countries.clear()
u.company_industries.clear()
u.company_provinces.clear()
u.target_members.clear()
u.target_companies.clear()
u.target_industries.clear()
@csrf_exempt
def delete(request):
msg = ""
if request.method == 'POST':
id = request.POST["id"]
member = request.session.get('member', None)
member_id = request.session['member']['id']
if member is None:
msg = "nologon"
else:
try:
member=get_object_or_404(Member,id=member_id)
d=Demand.objects.get(pk=id, member_id=member_id)
d.status=StatusDemand.deleted
d.save()
# terry 20150204 remark
write_demand_delete_log(request, member, d)
msg = "success"
except Exception, e:
msg = e.message
return HttpResponse(msg)
@csrf_exempt
def offline(request):
msg = ""
if request.method == 'POST':
id = request.POST["id"]
member = request.session.get('member', None)
member_id = request.session['member']['id']
if member is None:
msg = "nologon"
else:
try:
member=get_object_or_404(Member,id=member_id)
d=Demand.objects.get(pk=id, member_id=member_id)
d.status=StatusDemand.offline
d.save()
write_demand_offline_log(request, member, d)
msg = "success"
except Exception, e:
msg = e.message
return HttpResponse(msg)
@csrf_exempt
@member_login_required
def get_list_for_home(request):
c = {}
if request.method == 'GET':
try:
type = request.GET.get("type", "")
id = request.GET.get("id", "")
q1 = Q(status=StatusDemand.approved)
if type == "industry":
q2 = Q(company_industries=None) | Q(company_industries=id)
else:
q2 = Q(company_countries=None, company_provinces=None, company_cities=None)
location = request.GET.get("location", "")
if location == "city":
q2 = q2 | Q(company_cities=id)
elif location == "province":
q2 = q2 | Q(company_provinces=id)
else:
q2 = q2 | Q(company_countries=id)
if len(q2) > 0:
q1 = q1 & q2
demands = Demand.objects.filter(q1).order_by("-id")[0:10]
c['data'] = demands
except Exception, e:
logger.error(e.message)
return render_to_response("purchase/"+request.lang+"/list_for_home.html", c, context_instance=RequestContext(request))
@member_login_required
def download_attach(request, id):
reload(sys)
sys.setdefaultencoding('utf8')
demand = get_object_or_404(Demand, pk=id)
member_id = request.session['member']['id']
member = Member.objects.get(pk=member_id)
if demand.status != StatusDemand.approved and demand.member_id != member_id:
raise Http404
if demand.demand_attach.count() == 0:
return HttpResponse(_("no attach"))
if demand.is_suitor:
if demand.is_push_to_member(member) is False and demand.member_id != member_id:
return HttpResponse(_("not target"))
path = settings.MEDIA_ROOT + "/demand/"
#please view: http://stackoverflow.com/questions/12881294/django-create-a-zip-of-multiple-files-and-make-it-downloadable
# Files (local path) to put in the .zip
# FIXME: Change this (get paths from DB etc)
#filenames = ["/tmp/file1.txt", "/tmp/file2.txt"]
filenames = []
for attach in demand.demand_attach.all():
filenames.append(path+attach.new_name+"/"+attach.file_name)
# Folder name in ZIP archive which contains the above files
# E.g [thearchive.zip]/somefiles/file2.txt
# FIXME: Set this to something better
#zip_subdir = "somefiles"
zip_subdir = demand.name_cn
if request.lang == "en-us":
zip_subdir = demand.name_en
zip_filename = "%s.zip" % zip_subdir
# Open StringIO to grab in-memory ZIP contents
s = StringIO.StringIO()
# The zip compressor
zf = zipfile.ZipFile(s, "w")
for fpath in filenames:
# Calculate path for file in zip
#fdir, fname = os.path.split(fpath)
fnewname, fname = os.path.split(fpath)
if os.path.isfile(fnewname) is False:
break
#zip_path = os.path.join(zip_subdir, fname)
zip_path = os.path.join(zip_subdir, fname)
# Add file, at correct path
#zf.write(fpath, zip_path)
zf.write(fnewname, zip_path)
# Must close zip for all contents to be written
zf.close()
# Grab ZIP file from in-memory, make response with correct MIME-type
resp = HttpResponse(s.getvalue(), content_type="application/x-zip-compressed")
# ..and correct content-disposition
resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename.encode("utf8")
member.download_demand_attach(demand)
return resp
def _upload_project_file(f):
file_name = ""
path = settings.MEDIA_ROOT + "/demand/"
try:
if not os.path.exists(path):
os.makedirs(path)
file_ext = os.path.splitext(f.name)[1]
random_no = str(random.randint(0, 99999999)).zfill(8)
# print random_no
file_name = random_no + file_ext
destination = open(path + file_name, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
except Exception, e:
logger.error(e.message)
# print e
return file_name
def find_demands(condition, page, pagesize, sort):
demands = Demand.objects.all()
if condition.keyword != "":
demands = demands.filter(Q(name_cn__contains=condition.keyword) | Q(name_en__contains=condition.keyword))
if condition.type != "" and condition.type != "0" and condition.type != 0:
demands = demands.filter(service_type=condition.type)
if condition.country_id != 0 and condition.country_id != "" and condition.country_id != "0":
demands = demands.filter(Q(company_countries__id=condition.country_id)) #Q(company_countries=None) |
if condition.province_id != 0 and condition.province_id != "" and condition.province_id != "0":
demands = demands.filter(Q(company_provinces__id=condition.province_id)) #Q(company_provinces=None) |
if condition.industry is not None:
demands = demands.filter(Q(company_industries=condition.industry) | Q(company_industries__father=condition.industry) | Q(company_industries__father__father=condition.industry)) #Q(company_industries=None) |
if condition.member_id != 0 and condition.member_id != "":
demands = demands.filter(member_id=condition.member_id)
if condition.status != -1 and condition.status != "":
demands = demands.filter(status=condition.status)
if page <= 1:
page = 1
if pagesize <= 1:
pagesize = 1
start_record = (int(page)-1) * int(pagesize)
end_record = int(start_record) + int(pagesize)
if sort == "":
sort = "time_desc"
if sort == "time_desc":
demands = demands.order_by("-id")
elif sort == "time_asc":
demands = demands.order_by("id")
elif sort == "size_desc":
demands = demands.order_by("-deal_size")
elif sort == "size_asc":
demands = demands.order_by("deal_size")
elif sort == "hot_desc":
demands = demands.order_by("-pv")
total = demands.count()
data = demands[start_record:end_record]
return data, total
|
normal
|
{
"blob_id": "fb16009985ee7fe4a467a94160f593723b5aaf03",
"index": 7964,
"step-1": "# -*- coding: utf-8 -*- \nfrom django.http import Http404\nfrom django.shortcuts import render,render_to_response, get_object_or_404, redirect, HttpResponse\nfrom django.core.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom django.template import RequestContext,Context\nfrom django.template.loader import get_template\nimport os, sys\nfrom newchama import settings\nfrom newchama.helper import member_login_required\nfrom xhtml2pdf import pisa\nimport StringIO\nimport datetime\nfrom services.models import Deal,CompanyWithPE,Demand, Country, Industry, Member, Company, StatusDemand, Province, City, DemandAttach, DemandIndustry, InvestmentCompany, DemandOtherTargetCompany, ListedCompany\nfrom services.models import DemandViewLog, Message, DemandViewLog, Favorites, TypeFavorite, DemandVisitor, Project,News, StatusProject,Preference, PreferenceIndustry, PreferenceLocation, PreferenceKeyword\nfrom services.models import ConditionDemand, DemandKeyword, DemandKeywordEn\nfrom services.helper import Helper\nfrom recommond.views import update_project_recommond_list\nfrom recommond.models import RecommondProjectItem\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import messages\nimport logging\nimport random\nimport zipfile\nfrom django.db.models import Q, Sum, connection\nfrom sets import Set as set\nfrom django.utils import simplejson\nfrom log.views import *\n\nlogger = logging.getLogger(__name__)\n\n\n@member_login_required\ndef addsuccess(request):\n c = {}\n c['title'] = _(\"Buyer Recommended\")\n c['member'] = request.session.get('member', None)\n\n return render_to_response(\"purchase/\"+request.lang+\"/add_success.html\", c, context_instance=RequestContext(request))\n\n\ndef _is_has_condition(condition):\n return condition != \"\" and condition != \"0\"\n\n\n@member_login_required\ndef search(request):\n c = {}\n c.update(request)\n c['title'] = _(\"Purchase Search\")\n c['member'] = request.session.get('member', None)\n member_id = request.session['member']['id']\n is_search = request.GET.get(\"is_search\", \"\")\n\n if is_search == '1':\n is_has_condition = False\n condition = ConditionDemand()\n condition.status = StatusDemand.approved\n demands = Demand.objects.filter(status=StatusDemand.approved)\n keyword = request.GET.get(\"keyword\", \"\")\n country = request.GET.get(\"country\", \"\")\n #province = request.GET.get(\"province\", \"\")\n #city = request.GET.get(\"city\", \"\")\n type = request.GET.get(\"type\", \"\")\n industry_first = request.GET.get(\"industry_first\", \"\")\n industry_second = request.GET.get(\"industry_second\", \"\")\n industry_third = request.GET.get(\"industry_third\", \"\")\n if keyword != \"\":\n c[\"keyword\"] = keyword\n is_has_condition = True\n if type != \"\":\n c[\"type\"] = int(type)\n is_has_condition = True\n country_id = 0\n #province_id = 0\n #city_id = 0\n if _is_has_condition(country):\n country_id = int(country)\n c[\"country\"] = country_id\n is_has_condition = True\n industry = \"\"\n if _is_has_condition(industry_first):\n industry = industry_first\n c[\"industry_first\"] = int(industry_first)\n if _is_has_condition(industry_second):\n industry = industry_second\n c[\"industry_second\"] = int(industry_second)\n if _is_has_condition(industry_third):\n industry = industry_third\n c[\"industry_third\"] = int(industry_third)\n if industry != \"\":\n industry_condition = Industry.objects.get(pk=industry)\n condition.industry = industry_condition\n is_has_condition = True\n condition.country_id = country_id\n condition.keyword = keyword\n condition.type = type\n sort = \"time_desc\"\n if is_has_condition:\n data, total = find_demands(condition, 1, 5, sort)\n c['has_more'] = total > 5\n c['demands'] = data\n c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)\n c['is_search'] = True\n c['is_has_condition'] = is_has_condition\n c[\"SERVICE_TYPES\"] = Demand.SERVICE_TYPES\n c[\"countries\"] = Helper.find_countries()\n c[\"industries\"] = Helper.find_industries_level1()\n return render_to_response(\"purchase/\"+request.lang+\"/search.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef search_keyword(request):\n c = {}\n c['title'] = _(\"Search\")\n keyword = request.GET.get(\"keyword\", '')\n c['member'] = request.session.get('member', None)\n member_id = request.session['member']['id']\n member = get_object_or_404(Member,pk=member_id)\n c[\"keyword\"] = keyword\n demands = Demand.objects.filter(Q(status=StatusDemand.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword)))\n c['demands'] = demands[0:5]\n c['total_project'] = Project.objects.filter(Q(status=StatusProject.approved) & (Q(name_cn__contains=keyword) | Q(name_en__contains=keyword))).count()\n c['total_demand'] = demands.count()\n c['total_news'] = News.objects.filter(Q(title__contains=keyword) | Q(tag__contains=keyword)).count()\n c['total_company'] = Company.objects.filter(Q(short_name_cn__contains=keyword) | Q(short_name_en__contains=keyword)).exclude(id=27).count()\n c['total_member'] = Member.objects.filter(Q(last_name__contains=keyword) | Q(first_name__contains=keyword)).count()\n c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)\n\n write_search_demand_log(request,member,keyword)\n return render_to_response(\"purchase/\"+request.lang+\"/search_keyword.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef new(request):\n c = {}\n c['title'] = _(\"New Purchase\")\n c['member'] = request.session.get('member', None)\n # member_id = request.session['member']['id']\n # demands = Demand.objects.filter(status=StatusDemand.approved)\n # sort = request.GET.get(\"sort\", \"time_desc\")\n # c['sort'] = sort\n # c[sort] = \"active\"\n # if sort == \"time_desc\":\n # demands = demands.order_by(\"-id\")\n # elif sort == \"time_asc\":\n # demands = demands.order_by(\"id\")\n # check the preference is setting\n # pi = PreferenceIndustry.objects.filter(preference__member__id=member_id, preference__title=\"demand\")\n # pn = PreferenceKeyword.objects.filter(preference__member__id=member_id, preference__title=\"demand\")\n # pl = PreferenceLocation.objects.filter(preference__member__id=member_id, preference__title=\"demand\")\n # if len(pi) == 0 and len(pl) == 0 and len(pn) == 0:\n # c['need_preference'] = True\n # c['demands'] = demands[0:50]\n # else:\n # c['need_preference'] = False\n # c['demands'] = demands[0:10]\n # check finish\n member_id = request.session['member']['id']\n type = request.GET.get('type', 0)\n keyword = request.GET.get('keywords', '')\n country_id = request.GET.get('country_id', 0)\n province_id = request.GET.get('province_id', 0)\n industry_id = request.GET.get('industry_id', 0)\n sort = request.GET.get('sort', 'time_desc')\n condition = ConditionDemand()\n condition.country_id = country_id\n condition.keyword = keyword\n condition.status = StatusDemand.approved\n condition.province_id = province_id\n condition.type = type\n level = 1\n if industry_id != \"\" and industry_id != \"0\" and industry_id != 0:\n condition.industry = Industry.objects.get(pk=industry_id)\n level = condition.industry.level\n pagesize = 10\n data, total = find_demands(condition, 1, pagesize, sort)\n\n c[\"have_more_data\"] = len(data) == int(pagesize)\n c['demands'] = data\n c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)\n\n c['countries'] = Helper.find_countries()\n c['industries'] = Helper.find_industries_level1()\n c['total'] = total\n return render_to_response(\"purchase/\"+request.lang+\"/new.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef banking_genius(request, id_post):\n c = {}\n c['title'] = _(\"New Purchase\")\n c['member'] = request.session.get('member', None)\n try:\n page = request.GET.get('page', 0)\n pagesize = request.GET.get('pagesize', 10)\n condition = recommendCondiction(request, id_post)\n c[\"totalRecommend\"] = RecommondProjectItem.objects.filter(condition).count()\n c[\"recommendList\"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[page : pagesize]\n except Exception, e:\n logger.error(e.message)\n c[\"id\"] = id_post\n c[\"countries\"] = Helper.find_countries()\n c[\"industries\"] = Helper.find_industries_level1()\n c[\"project_title\"] = Demand.objects.get(pk=id_post).name_cn\n member_id = request.session['member']['id']\n c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)\n return render_to_response(\"purchase/\"+request.lang+\"/banking_genius.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef json_recommend(request):\n c = {}\n id_post = request.POST.get(\"id\", False)\n if request.method == \"POST\":\n try:\n page = request.POST.get('page', 1)\n pagesize = request.POST.get('pagesize', 10)\n if page <= 1:\n page = 1\n if pagesize <= 1:\n pagesize = 1\n start_record = (int(page)-1) * int(pagesize)\n end_record = int(start_record) + int(pagesize)\n\n if id_post:\n condition = recommendCondiction(request, id_post)\n c[\"recommendList\"] = list(RecommondProjectItem.objects.filter(condition).order_by('-id'))[start_record : end_record]\n member_id = request.session['member']['id']\n c['favorites_project_ids'] = Helper.find_member_favorite_project_ids(member_id)\n except Exception, e:\n logger.error(e. message)\n return render_to_response(\"purchase/\"+request.lang+\"/json_recommend.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef json_recommend_count(request):\n count = 0\n id_post = request.POST.get(\"id\", False)\n if request.method == \"POST\":\n try:\n if id_post:\n condition = recommendCondiction(request, id_post)\n count = RecommondProjectItem.objects.filter(condition).count()\n print count\n except Exception, e:\n logger.error(e. message)\n return HttpResponse(count)\n\n\n@member_login_required\ndef sync_recommond(request):\n result = \"success\"\n try:\n if request.method == \"POST\":\n id_post = request.POST.get(\"id\", False)\n if id_post:\n d = Demand.objects.get(pk=id_post)\n project_list=Project.objects.filter(status=2).filter(expire_date__gt=datetime.datetime.now()).order_by('-id')\n update_project_recommond_list(d, project_list)\n result = \"success\"\n except Exception, e:\n print e.message\n logger.error(e.message)\n return HttpResponse(result)\n\n\ndef recommendCondiction(request, id):\n condition = Q(demand_id=id, is_delete=False)\n condition2 = Q()\n\n target_location_id = request.POST.get('target_location_id', 0)\n target_industry_id = request.POST.get('target_industry_id', 0)\n target_location_type = request.POST.get('target_location_type', 0)\n if target_location_id != \"0\" and target_location_id != 0:\n if target_location_type == \"province\":\n condition2 = condition2 | Q (company_province=target_location_id)\n else:\n condition2 = condition2 | Q (company_country=target_location_id)\n if target_industry_id != \"0\" and target_industry_id != 0:\n condition2 = condition2 | Q (company_industry=target_industry_id)\n\n if target_location_id != \"0\" or target_industry_id != \"0\":\n p = Project.objects.filter(condition2)\n condition = condition & Q (project=p)\n\n return condition\n\n\ndef preferenceByMemberId(c, member_id):\n list = []\n preferences = Preference.objects.filter(member_id=member_id, title=\"demand\")[0: 1]\n condition = Q(status=StatusDemand.approved)\n if len(preferences) > 0:\n condition2 = Q()\n p = preferences[0]\n c['preference_demand_id'] = p.id\n preference_project_industries = p.preference_industry.all() #PreferenceIndustry.objects.filter(preference__member__id=member['id'])\n\n c['pre_demand_indusrtis'] = preference_project_industries\n if len(preference_project_industries) > 0:\n for ppi in preference_project_industries:\n condition2 = condition2 | Q (company_industries=ppi.industry_id)\n preference_project_location = p.preference_location.all()\n\n c['pre_demand_locations'] = preference_project_location\n if len(preference_project_location):\n for ppl in preference_project_location:\n condition2 = condition2 | Q (company_countries=ppl.country_id)\n\n condition = condition & condition2\n list = Demand.objects.filter(condition).order_by(\"-id\").distinct()[0: 3]\n return list\n\n\ndef demandByMemberId(member_Id):\n demands = Demand.objects.filter(member_id=member_Id, status=StatusDemand.approved).order_by(\"-id\")[0: 5]\n list_demand = []\n for demand in demands:\n count_message = Message.objects.filter(type_relation=2, demand=demand.id, is_read=0).count()\n count_favor = Favorites.objects.filter(type_relation=1, demand=demand.id).count()\n company_industries = demand.company_industries.all()\n count_company = 0\n count_industry = 0\n if company_industries:\n industry_ids = []\n industry_level_1_id = []\n for c in company_industries:\n industry_ids.append(c.id)\n industry_level = c.level\n industry_id = c.id\n if industry_level == 2:\n industry_id = c.father_id\n elif industry_level == 3:\n industry_id = c.father.father_id\n industry_level_1_id.append(industry_id)\n count_company = Company.objects.filter(industry__in=industry_ids, status=1).exclude(id=27).count()\n #start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)\n start_date = datetime.datetime.today()-datetime.timedelta(days=90)\n count_industry = Deal.objects.filter(cv1__in=industry_level_1_id, happen_date__gt=start_date).count()\n pro = {}\n pro[\"demand\"] = demand\n pro[\"count_message\"] = count_message\n pro[\"count_favor\"] = count_favor\n pro[\"count_industry\"] = count_industry\n pro[\"count_company\"] = count_company\n list_demand.append(pro)\n return list_demand\n\n\ndef countDemandStuffTotal(member_id):\n pvs = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved).aggregate(sum_pv=Sum('pv'))\n messages = 0#Message.objects.filter(type_relation=2, demand__member__id=member_id, is_read=0, is_delete=0).count()\n favorites = Favorites.objects.filter(type_relation=2, demand__member__id=member_id).count()\n cursor = connection.cursor()\n\n\n demands = Demand.objects.filter(member_id=member_id, status=StatusDemand.approved)\n industry_ids = []\n industry_ids_cv1 = []\n if demands:\n for d in demands:\n for cv1 in d.demand_industries.all():\n industry_ids_cv1.append(cv1.cv1)\n for industry in d.company_industries.all():\n industry_ids.append(industry.id)\n recommend_companies = Company.objects.filter(industry__in=set(industry_ids), status=1).exclude(id=27).count()\n\n #start_date = datetime.date(datetime.datetime.today().year, datetime.datetime.today().month - 3, datetime.datetime.today().day)\n start_date = datetime.datetime.today()-datetime.timedelta(days=90)\n recommend_industries = Deal.objects.filter(cv1__in=set(industry_ids_cv1), happen_date__gt=start_date).count()\n count_demand_all = {}\n count_demand_all[\"pvs\"] = pvs[\"sum_pv\"]\n count_demand_all[\"messages\"] = messages\n count_demand_all[\"favorites\"] = favorites\n count_demand_all[\"recommend_companies\"] = recommend_companies\n count_demand_all[\"recommend_industries\"] = recommend_industries\n return count_demand_all\n\n\n@csrf_exempt\n@member_login_required\ndef json_index(request):\n c = {}\n member_id = request.session['member']['id']\n if request.method == 'POST':\n try:\n condition = Q(status=StatusDemand.approved)\n condition2 = Q()\n industryIds = request.GET.get(\"industryId\", False)\n if industryIds and industryIds != \"0\":\n ids = industryIds.split(\",\")\n for id in ids:\n condition2 = condition2 | Q(company_industries=id)\n\n locationIds = request.GET.get(\"locationId\", False)\n if locationIds and locationIds != \"0\":\n ids = locationIds.split(\",\")\n for id in ids:\n condition2 = condition2 | Q(company_countries=id)\n condition = condition & condition2\n\n if industryIds == False and locationIds == False:\n result_list = preferenceByMemberId(c, member_id)\n else:\n result_list = Demand.objects.filter(condition).order_by(\"-id\").distinct()[0 : 3]\n\n c[\"result_list\"] = result_list\n list_demand_preference_plus = 3 - len(result_list)\n if list_demand_preference_plus > 0:\n c['recent_demand'] = Demand.objects.filter(status=StatusDemand.approved).order_by(\"-id\")[0: list_demand_preference_plus]\n except Exception, e:\n # print e.message\n logger.error('show demand json error!' + e.message)\n c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)\n return render_to_response(\"purchase/\"+request.lang+\"/json_index.html\", c, context_instance=RequestContext(request))\n\n\ndef countResult(result):\n resultList = {}\n # total_recommends = RecommondItem.objects.filter(is_delete=0, project__id=result.id).count()\n total_favorites = Favorites.objects.filter(type_relation=2, demand__id=result.id).count()\n not_read_messages = Message.objects.filter(type_relation=2, demand__id=result.id, is_read=0).count()\n resultList['total_recommends'] = RecommondProjectItem.objects.filter(demand=result, project__status=StatusProject.approved).count()\n resultList['total_target'] = 0\n resultList['total_favorites'] = total_favorites\n resultList['not_read_messages'] = not_read_messages\n resultList['id'] = result.id\n resultList['name_cn'] = result.name_cn\n resultList['name_en'] = result.name_en\n resultList['status'] = result.status\n resultList['statusName'] = result.get_status_display\n resultList['processName'] = result.get_process_display\n resultList['add_time'] = result.add_time\n resultList['pvs'] = result.pv\n resultList['integrity'] = result.integrity\n return resultList\n\n\n@member_login_required\ndef mylist(request, type):\n c = {}\n c.update(request)\n c['member'] = request.session.get('member', None)\n member_id = request.session['member']['id']\n demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by(\"-id\")\n c['total_all'] = demands.count()\n result_list_2 = []\n for result in demands:\n result_list_2.append(countResult(result))\n c['demands'] = result_list_2\n c[type] = \"active\"\n c['type'] = type\n '''\n demands_release = demands.filter(status=StatusDemand.approved)\n demands_draft = demands.filter(status=StatusDemand.draft)\n demands_pending = demands.filter(status=StatusDemand.pending)\n demands_not_approved = demands.filter(status=StatusDemand.not_approved)\n demands_offline = demands.filter(status=StatusDemand.offline)\n demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)\n d_list = {\"release\": demands_release, \"draft\": demands_draft, \"pending\": demands_pending, \"not_approved\": demands_not_approved, \"expired\": demands_expired}\n d_list.update({\"offline\": demands_offline, \"all\": demands})\n result_list = d_list.get(type, demands)\n result_list_2 = []\n for result in result_list:\n result_list_2.append(countResult(result))\n c['result_list'] = result_list_2\n total_all = demands.count()\n total_release = demands_release.count()\n total_pending = demands_pending.count()\n total_draft = demands_draft.count()\n total_offline = demands_offline.count()\n total_not_approved = demands_not_approved.count()\n c['total_all'] = total_all\n c['total_release'] = total_release\n c['total_pending'] = total_pending\n c['total_offline'] = total_offline\n c['total_not_approved'] = total_not_approved\n c['total_draft'] = total_draft\n total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()\n c['total_project'] = total_project\n c[type] = \"active\"\n c['type'] = type\n c['demands'] = result_list_2\n '''\n return render_to_response(\"purchase/\"+request.lang+\"/mylist.html\", c, context_instance=RequestContext(request))\n\n'''\n@member_login_required\ndef mylist(request, type, id=0):\n c = {}\n c.update(request)\n c['title'] = _(\"My Purchases\")\n c['member'] = request.session.get('member', None)\n member_id = request.session['member']['id']\n demands = Demand.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).order_by(\"-update_time\")\n demands_public = demands.filter(target_members=None, target_companies=None, target_industries=None)\n demands_private = demands.exclude(target_members=None, target_companies=None, target_industries=None)\n demands_release = demands.filter(status=StatusDemand.approved)\n demands_draft = demands.filter(status=StatusDemand.draft)\n demands_pending = demands.filter(status=StatusDemand.pending)\n demands_not_approved = demands.filter(status=StatusDemand.not_approved)\n demands_offline = demands.filter(status=StatusDemand.offline)\n demands_expired = demands.filter(expire_date__gt=datetime.datetime.today).exclude(status=StatusDemand.deleted)\n d_list = {\"release\": demands_release, \"draft\": demands_draft, \"pending\": demands_pending, \"not_approved\": demands_not_approved, \"expired\": demands_expired}\n d_list.update({\"offline\": demands_offline, \"all\": demands, \"public\": demands_public, \"private\": demands_private})\n result_list = d_list.get(type, demands)\n total = result_list.count()\n c['total'] = total\n total_all = demands.count()\n total_public = demands_public.count()\n total_private = demands_private.count()\n total_draft = demands_draft.count()\n c['total_all'] = total_all\n c['total_public'] = total_public\n c['total_private'] = total_private\n c['total_draft'] = total_draft\n total_project = Project.objects.filter(member_id=member_id).exclude(status=StatusDemand.deleted).count()\n c['total_project'] = total_project\n if total == 0:\n return render_to_response(\"purchase/\"+request.lang+\"/mylist_empty.html\", c, context_instance=RequestContext(request))\n ids = []\n for m in result_list:\n ids.append(m.id)\n id_current = int(id)\n if id_current == 0:\n demand = result_list[0]\n id_current = demand.id\n else:\n if id_current not in ids:\n raise Http404\n pageIndex = ids.index(id_current)+1\n demand = result_list[pageIndex-1]\n pageIndex = ids.index(id_current)+1\n #c['result_list'] = result_list\n pageTotal = total\n c['pageTotal'] = pageTotal\n page_start = 1\n page_end = 10\n if pageIndex >= 5:\n page_start = pageIndex - 4\n page_end = pageIndex + 5\n if page_end > pageTotal:\n page_end = pageTotal\n pages = ids[page_start-1:page_end]\n id_list_top = enumerate(pages, start=page_start)\n id_list = enumerate(pages, start=page_start)\n c['id_list_top'] = id_list_top\n c['id_list'] = id_list\n c['page_start'] = page_start\n c['page_end'] = page_end\n c[type] = \"active\"\n c['type'] = type\n c['d'] = demand\n c['pageIndex'] = pageIndex\n c['id_current'] = id_current\n c['first_id'] = ids[0]\n c['end_id'] = ids[total-1]\n if pageIndex > 1:\n c['pre_id'] = ids[pageIndex-1]\n if pageIndex < pageTotal:\n c['next_id'] = ids[pageIndex]\n if page_end < pageTotal:\n c['next_id_page_end'] = ids[page_end]\n visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by(\"-add_time\")\n c['visitors'] = visitors\n c['visitors_count'] = visitors.count()\n followers = Favorites.objects.filter(demand_id=demand.id).order_by(\"-add_time\")\n c['followers'] = followers\n message_list = Message.objects.filter(demand_id=demand.id).order_by(\"-add_time\")\n c['message_list'] = message_list\n \n if len(demand.company_industries.all())>0:\n #之后用cv1替代\n if demand.company_industries.all()[0].level==3:\n c['deal_list_more_id']=demand.company_industries.all()[0].father.father.id\n\n elif demand.company_industries.all()[0].level==2:\n c['deal_list_more_id']=demand.company_industries.all()[0].father.id\n \n else:\n c['deal_list_more_id']=demand.company_industries.all()[0].id\n\n c['deal_list'] =Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]\n\n c['compare_cn']= CompanyWithPE.objects.filter(country__name_en='China',industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]\n \n c['compare_usa']= CompanyWithPE.objects.filter(country__name_en='United States of America',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n \n c['compare_hk']= CompanyWithPE.objects.filter(country__name_en='Hong Kong',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n \n c['compare_uk']= CompanyWithPE.objects.filter(country__name_en='United Kingdom',industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n\n return render_to_response(\"purchase/\"+request.lang+\"/mylist.html\", c, context_instance=RequestContext(request))\n'''\n\n@member_login_required\ndef mydetail(request, id):\n c = {}\n member = request.session.get('member', None)\n c['member'] = member\n member_id = request.session['member']['id']\n demand = get_object_or_404(Demand, pk=id, member_id=member_id)\n\n # c['d'] = demand\n m = countResult(demand)\n c['process'] = Demand.PROCESS\n c['d'] = m\n _visitors = DemandVisitor.objects.filter(demand_id=demand.id).order_by(\"-add_time\")[0:8]\n visitors=[]\n c['recommendList'] = RecommondProjectItem.objects.filter(demand=demand, project__status=StatusProject.approved).order_by(\"project__update\")[0:5]\n for v in _visitors:\n if v.member.email.find('@newchama.com')==-1:\n visitors.append(v)\n \n c['visitors'] = visitors\n c['visitors_count'] = len(visitors)\n followers = Favorites.objects.filter(demand_id=demand.id).order_by(\"-add_time\")\n c['followers'] = followers\n message_list = Message.objects.filter(demand_id=demand.id).order_by(\"-add_time\")\n c['message_list'] = message_list\n if len(demand.company_industries.all()) > 0:\n #之后用cv1替代\n if demand.company_industries.all()[0].level == 3:\n c['deal_list_more_id'] = demand.company_industries.all()[0].father.father.id\n\n elif demand.company_industries.all()[0].level == 2:\n c['deal_list_more_id'] = demand.company_industries.all()[0].father.id\n\n else:\n c['deal_list_more_id'] = demand.company_industries.all()[0].id\n\n c['deal_list'] = Deal.objects.filter(cv1=c['deal_list_more_id']).order_by('-update_time')[0:10]\n\n c['compare_cn'] = CompanyWithPE.objects.filter(country__name_en='China', industry__id=c['deal_list_more_id']).order_by('-ps')[0:5]\n\n c['compare_usa'] = CompanyWithPE.objects.filter(country__name_en='United States of America', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n\n c['compare_hk'] = CompanyWithPE.objects.filter(country__name_en='Hong Kong', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n\n c['compare_uk'] = CompanyWithPE.objects.filter(country__name_en='United Kingdom', industry__id=c['deal_list_more_id']).order_by('-pe')[0:5]\n return render_to_response(\"purchase/\"+request.lang+\"/mydetail.html\", c, context_instance=RequestContext(request))\n\n\ndef ajax_more(request):\n c = {}\n member = request.session.get('member', None)\n if member is None:\n return None\n member_id = request.session['member']['id']\n page = request.GET.get('page', 1)\n pagesize = request.GET.get('pagesize', 10)\n type = request.GET.get('type', 0)\n keyword = request.GET.get('keywords', '')\n country_id = request.GET.get('country_id', 0)\n province_id = request.GET.get('province_id', 0)\n industry_id = request.GET.get('industry_id', 0)\n sort = request.GET.get('sort', 'time_desc')\n condition = ConditionDemand()\n condition.country_id = country_id\n condition.keyword = keyword\n condition.status = StatusDemand.approved\n condition.province_id = province_id\n condition.type = type\n level = 1\n if industry_id != \"\" and industry_id != \"0\" and industry_id != 0:\n condition.industry = Industry.objects.get(pk=industry_id)\n level = condition.industry.level\n data, total = find_demands(condition, page, pagesize, sort)\n\n c['demands'] = data\n c[\"have_more_data\"] = len(data) == int(pagesize)\n c['favorites_demand_ids'] = Helper.find_member_favorite_demand_ids(member_id)\n return render_to_response(\"purchase/\"+request.lang+\"/ajax_list.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\n@csrf_protect\ndef add(request):\n c = {}\n c.update(csrf(request))\n c['title'] = _(\"Add Purchase\")\n c['member'] = request.session.get('member', None)\n u = Demand()\n u.valid_day = 60\n # if request.method == \"POST\":\n # u = Demand()\n # name_en = request.POST[\"name_en\"]\n # name_cn = request.POST[\"name_cn\"]\n # if name_en == \"\" and name_cn == \"\":\n # isvalid = False\n # messages.warning(request, _(\"please input demand name\"))\n # submitStatus = request.POST[\"submitStatus\"]\n # redirect_url = \"purchase.mylist_pending\"\n # if submitStatus == \"draft\":\n # u.status = StatusDemand.draft\n # redirect_url = \"purchase.mylist_draft\"\n # else:\n # u.status = StatusDemand.pending\n # _bind_data(request, u)\n # if isvalid:\n # try:\n # u.financial_year = datetime.datetime.today().year\n # u.save()\n # _save_items(request, u)\n # return redirect(redirect_url)\n # except Exception, e:\n # messages.warning(request, e.message)\n # logging.error(e.message)\n c['target_companies_count'] = 0\n c[\"u\"] = u\n c['readSuitorRelate'] = False\n _load_types(c)\n return render_to_response(\"purchase/\"+request.lang+\"/add.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\n@csrf_protect\ndef edit(request, id):\n c = {}\n c.update(csrf(request))\n c['title'] = _(\"Edit Purchase\")\n c['member'] = request.session.get('member', None)\n member_id = c['member']['id']\n isvalid = True\n u = get_object_or_404(Demand, pk=id, member_id=member_id)\n c['attachments'] = u.demand_attach.all()\n c['u'] = u\n\n c[\"other_target_companies\"] = DemandOtherTargetCompany.objects.filter(demand__id=u.id)\n countrySelected = u.company_countries.all()\n if countrySelected:\n c['company_country'] = countrySelected[0]\n provinceSelected = u.company_provinces.all()\n if provinceSelected:\n c['company_province'] = provinceSelected[0]\n industrySelected = u.demand_industries.all()\n if industrySelected:\n c['company_industry'] = industrySelected[0]\n c['target_companies_count'] = u.target_companies.all().count()\n c['readSuitorRelate'] = True\n if request.lang == \"en-us\":\n mks = u.demand_keyword_en.all()\n else:\n mks = u.demand_keyword.all()\n c['mks'] = mks\n keywords = \"\"\n if len(mks) > 0:\n for m in mks:\n keywords += m.keyword + \",\"\n keywords = keywords[0 : len(keywords) - 1]\n c['keywords'] = keywords\n _load_types(c)\n member = get_object_or_404(Member,id=member_id)\n write_demand_edit_log(request,member,u)\n return render_to_response(\"purchase/\"+request.lang+\"/add.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef detail(request, id):\n c = {}\n c['title'] = _(\"Purchase Detail\")\n member = request.session.get('member', None)\n c['member'] = member\n member_id = request.session['member']['id']\n if Helper.hasAgentRole(member['company_type']):\n messages.warning(request, _(\"You have no permission to visit this page\"))\n return render_to_response(\"services/error_message.html\", c, context_instance=RequestContext(request))\n demand = get_object_or_404(Demand, pk=id)\n if demand.status != StatusDemand.approved and demand.member_id != member_id:\n raise Http404\n if demand.is_suitor:\n if demand.is_push_to_member(member) is False and demand.member_id != member_id:\n messages.warning(request, _(\"not target\"))\n return render_to_response(\"services/error_message.html\", c, context_instance=RequestContext(request))\n # return HttpResponse(_(\"not target\"))\n c['d'] = demand\n #c['last_year'] = demand.financial_year-1\n\n #demands_other = Demand.objects.filter(member_id=demand.member_id, status=StatusDemand.approved, is_anonymous=False).exclude(id=id)[0:5]\n #c['demands_other'] = demands_other\n #demands_recommend = Demand.objects.filter(service_type=demand.service_type, status=StatusDemand.approved).exclude(id=id).order_by(\"-pv\")[0:5]\n #c['demands_recommend'] = demands_recommend\n c['message_list'] = Message.objects.filter(type_relation=2, demand=demand, is_delete=0).order_by('-add_time')\n if demand.member_id == member_id:\n c['is_own'] = True\n member = Member.objects.get(id=member_id)\n member.view_demand(demand)\n\n if request.lang == \"en-us\":\n mks = demand.demand_keyword_en.all()\n else:\n mks = demand.demand_keyword.all()\n\n keywords = \"\"\n if len(mks) > 0:\n for m in mks:\n keywords += m.keyword + \",\"\n keywords = keywords[0 : len(keywords) - 1]\n c['keywords'] = keywords\n\n c['is_added_favorite'] = member.is_added_demand_to_favorites(demand)\n c['is_expired']=datetime.date.today() > demand.expire_date\n\n url = \"/detail.html\"\n type = request.GET.get(\"type\", \"\")\n c['type'] = type\n if type == \"1\":\n url = \"/view.html\"\n else:\n write_demand_view_log(request,member,demand, type)\n return render_to_response(\"purchase/\"+request.lang+ url, c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef pdf(request, id):\n reload(sys)\n sys.setdefaultencoding('utf8')\n c = {}\n c['title'] = _(\"Purchase Detail\")\n c['member'] = request.session.get('member', None)\n member_id = request.session['member']['id']\n demand = get_object_or_404(Demand, pk=id)\n member = Member.objects.get(pk=member_id)\n if demand.status != StatusDemand.approved and demand.member_id != member_id:\n raise Http404\n if demand.is_suitor:\n if demand.is_push_to_member(member) is False and demand.member_id != member_id:\n return HttpResponse(_(\"not target\"))\n c['d'] = demand\n c['last_year'] = demand.financial_year-1\n c['static_root'] = settings.STATICFILES_DIRS[0]\n template = get_template(\"purchase/\"+request.lang+\"/detail_pdf.html\")\n html = template.render(Context(c))\n\n #print(html)\n file = StringIO.StringIO()\n #file = open(os.path.join(settings.MEDIA_ROOT, 'test.pdf'), \"w+b\")\n pisaStatus = pisa.CreatePDF(html, dest=file)\n\n # Return PDF document through a Django HTTP response\n file.seek(0)\n pdf = file.read()\n file.close() # Don't forget to close the file handle\n member.print_demand(demand)\n write_demand_teaser_view_log(request,member,demand)\n return HttpResponse(pdf, mimetype='application/pdf')\n\n\n@member_login_required\ndef save(request):\n response_data = {}\n response_data['result'] = 'failed'\n if request.method == \"POST\":\n try:\n name_en = request.POST[\"name_en\"]\n name_cn = request.POST[\"name_cn\"]\n if name_en == \"\" and name_cn == \"\":\n response_data['message'] = _(\"please input demand name\")\n else:\n #check stock_symbol is correct\n company_stock_symbol = request.POST.get(\"company_stock_symbol\", False)\n is_list_company = int(request.POST.get(\"is_list_company\", 0))\n if company_stock_symbol and is_list_company == 1:\n checksymbolExsit = ListedCompany.objects.filter(stock_symbol=company_stock_symbol)\n if len(checksymbolExsit) == 0:\n response_data['message'] = 'symbolNotExsit'\n return HttpResponse(simplejson.dumps(response_data), content_type=\"text/plain\")\n submitStatus = request.POST[\"submitStatus\"]\n u = Demand()\n isExsit = False\n id_post = request.POST.get(\"id\", False)\n #check the demand is exsit with member_id\n condition = Q(member_id=request.session[\"member\"][\"id\"])\n condition2 = Q()\n if id_post:\n condition = condition & ~Q(pk=id_post)\n if name_cn.strip() != \"\":\n condition2 = condition2 | Q(name_cn=name_cn.strip())\n\n if name_en.strip() != \"\":\n condition2 = condition2 | Q(name_en=name_en.strip())\n\n project = Demand.objects.filter(condition & condition2)\n if project:\n isExsit = True\n response_data['message'] = \"demandExsit\"\n\n if isExsit is False:\n if id_post:\n u = Demand.objects.get(pk=id_post)\n if u.status != StatusDemand.approved: #Terry mark, when the project is approved then do not reset the pending status\n if submitStatus == \"draft\":\n u.status = StatusDemand.draft\n else:\n u.status = StatusDemand.pending\n bool, msg = _bind_data(request, u)\n if bool:\n response_data['result'] = 'success'\n response_data['id'] = u.id\n response_data['message'] = '操作成功'\n else:\n response_data['message'] = msg\n\n except Exception, e:\n logger.error(e.message)\n response_data['message'] = e.message\n return HttpResponse(simplejson.dumps(response_data), content_type=\"text/plain\")\n\ndef _load_types(c):\n c[\"current_year\"] = datetime.datetime.today().year\n c[\"last_year\"] = datetime.datetime.today().year-1\n c[\"FINANCIAL_TYPES\"] = Demand.FINANCIAL_TYPES\n c[\"FINANCIAL_TYPES_2\"] = Demand.FINANCIAL_TYPES_2\n c[\"STOCK_STRUCTURE_PERCENTAGE_TYPES\"] = Demand.STOCK_STRUCTURE_PERCENTAGE_TYPES\n c[\"CURRENCY_TYPES\"] = Demand.CURRENCY_TYPES\n c[\"EMPLOYEES_COUNT_TYPES\"] = Demand.EMPLOYEES_COUNT_TYPES\n c[\"SERVICE_TYPES\"] = Demand.SERVICE_TYPES\n c[\"SERVICE_TYPES_2\"] = Demand.SERVICE_TYPES_2\n c[\"countries\"] = Helper.find_countries()\n c[\"industries\"] = Helper.find_industries_level1()\n c[\"members\"] = Member.objects.all()\n c[\"companies\"] = Company.objects.all().exclude(id=27)\n\n\ndef _bind_data(request, u):\n has_attach = False\n upload_types = request.POST.getlist(\"upload_types\", [])\n for ut in upload_types:\n uf = request.FILES.get(\"upload_file_\" + ut, False)\n if uf:\n file_ext = os.path.splitext(uf.name)[1].lower()\n if uf.size > 20000000:\n return False, \"tooBig\"\n #return _(\"The file cannot be more than 20M\")\n if file_ext != \".doc\" and file_ext != \".docx\" and file_ext != \".pdf\" and file_ext != \".ppt\" and file_ext != \".pptx\":\n return False, \"typeError\"\n #return _(\"The file must be 'doc|docx|pdf'\")\n has_attach = True\n\n integrity = 0\n u.name_cn = request.POST.get(\"name_cn\", None)\n u.name_en = request.POST.get(\"name_en\", None)\n if request.POST.get(\"name_cn\", False) or request.POST.get(\"name_en\", False):\n integrity = integrity + 1\n\n if request.POST.get(\"service_type\", False):\n u.service_type = request.POST[\"service_type\"]\n integrity = integrity + 1\n\n pay_currency = request.POST.get(\"pay_currency\", False)\n if pay_currency and pay_currency != \"\":\n u.pay_currency = pay_currency.replace(\",\", \"\")\n integrity = integrity + 1\n\n u.is_list_company = int(request.POST.get(\"is_list_company\", 0))\n integrity = integrity + 1\n\n project_relation = request.POST.get(\"project_relation\", False)\n if project_relation and project_relation != \"\":\n u.project_relation = project_relation.replace(\",\", \"\")\n integrity = integrity + 1\n\n valid_day = int(request.POST.get(\"valid_day\", 0))\n u.valid_day = valid_day\n\n u.expire_date = datetime.datetime.today() + datetime.timedelta(days=int(valid_day))\n integrity = integrity + 1\n\n u.is_anonymous = int(request.POST.get(\"is_anonymous\", \"0\"))\n integrity = integrity + 1\n\n exist_upload_names = request.POST.getlist(\"exist_upload_names\", [])\n if has_attach or exist_upload_names:\n integrity = integrity + 1\n u.has_attach = True\n else:\n u.has_attach = False\n\n #country\n #industry\n #project_keyword\n\n u.employees_count_type = request.POST.get(\"employees_count_type\", None)\n if request.POST.get(\"employees_count_type\", False):\n integrity = integrity + 1\n\n if request.POST.get(\"stock_structure_percentage_type_institutional\", False):\n u.stock_structure_percentage_type_institutional = request.POST[\"stock_structure_percentage_type_institutional\"]\n if request.POST.get(\"stock_structure_percentage_type_management\", False):\n u.stock_structure_percentage_type_management = request.POST[\"stock_structure_percentage_type_management\"]\n if request.POST.get(\"stock_structure_percentage_type_private\", False):\n u.stock_structure_percentage_type_private = request.POST[\"stock_structure_percentage_type_private\"]\n\n if request.POST.get(\"stock_structure_percentage_type_institutional\", False) or request.POST.get(\"stock_structure_percentage_type_institutional\", False) or request.POST.get(\"stock_structure_percentage_type_private\", False):\n integrity = integrity + 1\n\n u.currency_type_financial = request.POST.get(\"currency_type\", None)\n integrity = integrity + 1\n\n expected_enterprice_value_enter = request.POST.get(\"expected_enterprice_value_enter\", False)\n if expected_enterprice_value_enter and expected_enterprice_value_enter != \"\":\n u.expected_enterprice_value_enter = expected_enterprice_value_enter.replace(\",\", \"\")\n integrity = integrity + 1\n\n #new column\n stock_percent = request.POST.get(\"stock_percent\", False)\n if stock_percent and stock_percent != \"\":\n u.stock_percent = stock_percent\n integrity = integrity + 1\n\n deal_size_enter = request.POST.get(\"deal_size_enter\", False)\n if deal_size_enter and deal_size_enter != \"\":\n u.deal_size_enter = deal_size_enter.replace(\",\", \"\")\n integrity = integrity + 1\n\n income_last_phase_enter = request.POST.get(\"income_last_phase_enter\", False)\n if income_last_phase_enter and income_last_phase_enter != \"\":\n u.income_last_phase_enter = income_last_phase_enter.replace(\",\", \"\")\n integrity = integrity + 1\n\n profit_last_phase_enter = request.POST.get(\"profit_last_phase_enter\", False)\n if profit_last_phase_enter and profit_last_phase_enter != \"\":\n u.profit_last_phase_enter = profit_last_phase_enter.replace(\",\", \"\")\n integrity = integrity + 1\n\n ebitda = request.POST.get(\"ebitda\", False)\n if ebitda and ebitda != \"\":\n u.ebitda = ebitda.replace(\",\", \"\")\n integrity = integrity + 1\n\n u.audit_status = int(request.POST.get(\"audit_status\", 0))\n integrity = integrity + 1\n\n u.process = request.POST.get(\"process\", 0)\n\n '''\n no input start\n '''\n member_id = request.session[\"member\"][\"id\"]\n if member_id != \"0\" and member_id != \"\":\n u.member = Member.objects.get(pk=member_id)\n u.business_cn = request.POST.get(\"business_cn\", None)\n u.business_en = request.POST.get(\"business_en\", None)\n # if request.POST.get(\"business_cn\", False) or request.POST.get(\"business_en\", False):\n # integrity = integrity + 1\n u.company_stock_symbol = request.POST.get(\"company_stock_symbol\", None)\n #u.company_symbol = request.POST[\"company_symbol\"]\n\n financial_is_must_audit = int(request.POST.get(\"financial_is_must_audit\", 0))\n u.financial_is_must_audit = financial_is_must_audit\n if financial_is_must_audit == 1:\n u.financial_audit_company_is_must_default = True\n elif financial_is_must_audit == 2:\n u.financial_audit_company_is_must_default = False\n\n # if request.POST[\"growth_three_year\"] != \"\":\n # u.growth_three_year = request.POST[\"growth_three_year\"]\n # integrity = integrity + 1\n\n deal_size = request.POST.get(\"deal_size\", False)\n if deal_size and deal_size != \"\":\n u.deal_size = deal_size\n # integrity = integrity + 1\n\n if request.POST.get(\"income\", False):\n u.income = request.POST[\"income\"]\n if request.POST.get(\"income_last_phase\", False):\n u.income_last_phase = request.POST[\"income_last_phase\"]\n # integrity = integrity + 1\n u.intro_cn = request.POST.get(\"intro_cn\", None)\n u.intro_en = request.POST.get(\"intro_en\", None)\n # if request.POST.get(\"intro_cn\", False) or request.POST.get(\"intro_en\", False):\n # integrity = integrity + 1\n u.is_suitor = int(request.POST.get(\"is_suitor\", \"0\"))\n # u.net_assets = request.POST[\"net_assets\"]\n if request.POST.get(\"profit\", False):\n u.profit = request.POST[\"profit\"]\n if request.POST.get(\"profit_last_phase\", False):\n u.profit_last_phase = request.POST[\"profit_last_phase\"]\n if request.POST.get(\"registered_capital\", False):\n u.registered_capital = request.POST[\"registered_capital\"]\n\n total_assets_last_phase = request.POST.get(\"total_assets_last_phase\", False)\n if total_assets_last_phase and total_assets_last_phase != \"\":\n u.total_assets_last_phase = total_assets_last_phase.replace(\",\", \"\")\n\n # u.remark_cn = request.POST[\"remark_cn\"]\n # u.remark_en = request.POST[\"remark_en\"]\n # u.financial_audit_company_name = request.POST[\"financial_audit_company_name\"]\n if request.POST.get(\"expected_enterprice_value\", False):\n u.expected_enterprice_value = request.POST[\"expected_enterprice_value\"]\n # integrity = integrity + 1\n name_project_cn = request.POST.get(\"name_project_cn\", False)\n if name_project_cn:\n u.name_project_cn = name_project_cn\n else:\n u.name_project_cn = \"\"\n\n name_project_en = request.POST.get(\"name_project_en\", False)\n if name_project_en:\n u.name_project_en = name_project_en\n else:\n u.name_project_en = \"\"\n # if request.POST.get(\"name_project_cn\", False) or request.POST.get(\"name_project_en\", False):\n # integrity = integrity + 1\n\n project_stage = request.POST.get(\"project_stage\", False)\n if project_stage and project_stage != \"\":\n u.project_stage = project_stage.replace(\",\", \"\")\n\n pay_way = request.POST.get(\"pay_way\", False)\n if pay_way and pay_way != \"\":\n u.pay_way = pay_way.replace(\",\", \"\")\n\n income_enter = request.POST.get(\"income_enter\", False)\n if income_enter and income_enter != \"\":\n u.income_enter = income_enter.replace(\",\", \"\")\n\n profit_enter = request.POST.get(\"profit_enter\", False)\n if profit_enter and profit_enter != \"\":\n u.profit_enter = profit_enter.replace(\",\", \"\")\n\n # if request.POST.get(\"income\", False) or request.POST.get(\"income_enter\", False):\n # integrity = integrity + 1\n #\n # if request.POST.get(\"profit\", False) or request.POST.get(\"profit_enter\", False):\n # integrity = integrity + 1\n\n total_assets = request.POST.get(\"total_assets\", False)\n if total_assets and total_assets != \"\":\n u.total_assets = total_assets.replace(\",\", \"\")\n # integrity = integrity + 1\n\n total_profit = request.POST.get(\"total_profit\", False)\n if total_profit and total_profit != \"\":\n u.total_profit = total_profit.replace(\",\", \"\")\n # integrity = integrity + 1\n '''\n no input end\n '''\n\n #new column end\n u.save()\n\n u.demand_attach.all().delete()\n exist_upload_names = request.POST.getlist(\"exist_upload_names\", [])\n exist_upload_newNames = request.POST.getlist(\"exist_upload_newNames\", [])\n upload_type_names = request.POST.getlist(\"upload_type_names\", [])\n upload_types = request.POST.getlist(\"upload_types\", [])\n for ut, tn in zip(upload_types, upload_type_names):\n uf = request.FILES.get(\"upload_file_\" + ut, False)\n if uf:\n # if uf.size > 2000000:\n # messages.error(request, _(\"The file cannot be more than 2M\"))\n # return\n da = DemandAttach()\n da.demand = u\n da.file_name = uf.name\n da.file_type = ut\n da.file_type_name = tn\n da.new_name = _upload_project_file(uf)\n da.save()\n else:\n for t, f, n in zip(upload_types, exist_upload_names, exist_upload_newNames): #if upload not exsit, check the file that has already exsit file\n if t == ut:\n da = DemandAttach()\n da.demand = u\n da.file_name = f\n da.file_type = ut\n da.file_type_name = tn\n da.new_name = n\n da.save()\n break\n\n countries_ids = request.POST.getlist(\"country\", [])\n if countries_ids is not None:\n integrity = integrity + 1\n for id in countries_ids:\n if id != \"0\" and id != \"\":\n u.company_countries = countries_ids\n\n provinces_ids = request.POST.getlist(\"province\", [])\n if provinces_ids is not None:\n for id in provinces_ids:\n if id != \"0\" and id != \"\":\n c = Province.objects.get(pk=id)\n u.company_provinces.add(c)\n targetCompanies = request.POST.getlist(\"target_companies\", [])\n if targetCompanies:\n u.target_companies = request.POST.getlist(\"target_companies\", [])\n\n # industries_ids = request.POST.getlist(\"industry\", [])\n industries_ids = request.POST.getlist(\"industry_id\", [])\n u.company_industries.clear()\n DemandIndustry.objects.filter(demand_id=u.id).delete();\n if industries_ids is not None:\n integrity = integrity + 1\n for id in industries_ids:\n if id != \"0\" and id != \"\":\n c = Industry.objects.get(pk=id)\n u.company_industries.add(c)\n di = DemandIndustry()\n di.demand = u\n if c.level == 3:\n di.cv3 = c.id\n di.cv2 = c.father_id\n di.cv1 = c.father.father_id\n elif c.level == 2:\n di.cv2 = c.id\n di.cv1 = c.father_id\n else:\n di.cv1 = c.id\n di.save()\n\n demand_keyword = request.POST.get(\"project_keyword\", False)\n if request.lang == \"en-us\":\n u.demand_keyword_en.all().delete()\n else:\n u.demand_keyword.all().delete()\n if demand_keyword:\n integrity = integrity + 1\n mks = demand_keyword.split(\",\")\n for m in mks:\n if request.lang == \"en-us\":\n k = DemandKeywordEn()\n else:\n k = DemandKeyword()\n k.keyword = m\n k.demand = u\n k.save()\n\n integrity = int(integrity * 100 / 21)\n if request.lang == \"zh-cn\":\n u.integrity = integrity\n else:\n u.integrity_en = integrity\n u.save()\n\n return True, \"ok\"\n\ndef _clear_items(u):\n u.company_countries.clear()\n u.company_industries.clear()\n u.company_provinces.clear()\n u.target_members.clear()\n u.target_companies.clear()\n u.target_industries.clear()\n\n@csrf_exempt\ndef delete(request):\n msg = \"\"\n if request.method == 'POST':\n id = request.POST[\"id\"]\n member = request.session.get('member', None)\n member_id = request.session['member']['id']\n if member is None:\n msg = \"nologon\"\n else:\n try:\n member=get_object_or_404(Member,id=member_id)\n \n d=Demand.objects.get(pk=id, member_id=member_id)\n d.status=StatusDemand.deleted\n d.save()\n\n # terry 20150204 remark\n write_demand_delete_log(request, member, d)\n\n msg = \"success\"\n except Exception, e:\n msg = e.message\n return HttpResponse(msg)\n\n\n@csrf_exempt\ndef offline(request):\n msg = \"\"\n if request.method == 'POST':\n id = request.POST[\"id\"]\n member = request.session.get('member', None)\n member_id = request.session['member']['id']\n if member is None:\n msg = \"nologon\"\n else:\n try:\n member=get_object_or_404(Member,id=member_id)\n\n d=Demand.objects.get(pk=id, member_id=member_id)\n d.status=StatusDemand.offline\n d.save()\n\n write_demand_offline_log(request, member, d)\n \n\n msg = \"success\"\n except Exception, e:\n msg = e.message\n return HttpResponse(msg)\n\n\n@csrf_exempt\n@member_login_required\ndef get_list_for_home(request):\n c = {}\n if request.method == 'GET':\n try:\n type = request.GET.get(\"type\", \"\")\n id = request.GET.get(\"id\", \"\")\n q1 = Q(status=StatusDemand.approved)\n if type == \"industry\":\n q2 = Q(company_industries=None) | Q(company_industries=id)\n else:\n q2 = Q(company_countries=None, company_provinces=None, company_cities=None)\n location = request.GET.get(\"location\", \"\")\n if location == \"city\":\n q2 = q2 | Q(company_cities=id)\n elif location == \"province\":\n q2 = q2 | Q(company_provinces=id)\n else:\n q2 = q2 | Q(company_countries=id)\n if len(q2) > 0:\n q1 = q1 & q2\n demands = Demand.objects.filter(q1).order_by(\"-id\")[0:10]\n c['data'] = demands\n except Exception, e:\n logger.error(e.message)\n return render_to_response(\"purchase/\"+request.lang+\"/list_for_home.html\", c, context_instance=RequestContext(request))\n\n\n@member_login_required\ndef download_attach(request, id):\n reload(sys)\n sys.setdefaultencoding('utf8')\n demand = get_object_or_404(Demand, pk=id)\n member_id = request.session['member']['id']\n member = Member.objects.get(pk=member_id)\n if demand.status != StatusDemand.approved and demand.member_id != member_id:\n raise Http404\n if demand.demand_attach.count() == 0:\n return HttpResponse(_(\"no attach\"))\n if demand.is_suitor:\n if demand.is_push_to_member(member) is False and demand.member_id != member_id:\n return HttpResponse(_(\"not target\"))\n path = settings.MEDIA_ROOT + \"/demand/\"\n #please view: http://stackoverflow.com/questions/12881294/django-create-a-zip-of-multiple-files-and-make-it-downloadable\n # Files (local path) to put in the .zip\n # FIXME: Change this (get paths from DB etc)\n #filenames = [\"/tmp/file1.txt\", \"/tmp/file2.txt\"]\n filenames = []\n for attach in demand.demand_attach.all():\n filenames.append(path+attach.new_name+\"/\"+attach.file_name)\n # Folder name in ZIP archive which contains the above files\n # E.g [thearchive.zip]/somefiles/file2.txt\n # FIXME: Set this to something better\n #zip_subdir = \"somefiles\"\n zip_subdir = demand.name_cn\n if request.lang == \"en-us\":\n zip_subdir = demand.name_en\n zip_filename = \"%s.zip\" % zip_subdir\n # Open StringIO to grab in-memory ZIP contents\n s = StringIO.StringIO()\n\n # The zip compressor\n zf = zipfile.ZipFile(s, \"w\")\n for fpath in filenames:\n # Calculate path for file in zip\n #fdir, fname = os.path.split(fpath)\n fnewname, fname = os.path.split(fpath)\n if os.path.isfile(fnewname) is False:\n break\n #zip_path = os.path.join(zip_subdir, fname)\n zip_path = os.path.join(zip_subdir, fname)\n # Add file, at correct path\n #zf.write(fpath, zip_path)\n zf.write(fnewname, zip_path)\n\n # Must close zip for all contents to be written\n zf.close()\n # Grab ZIP file from in-memory, make response with correct MIME-type\n resp = HttpResponse(s.getvalue(), content_type=\"application/x-zip-compressed\")\n # ..and correct content-disposition\n resp['Content-Disposition'] = 'attachment; filename=%s' % zip_filename.encode(\"utf8\")\n\n member.download_demand_attach(demand)\n return resp\n\n\ndef _upload_project_file(f):\n file_name = \"\"\n path = settings.MEDIA_ROOT + \"/demand/\"\n try:\n if not os.path.exists(path):\n os.makedirs(path)\n file_ext = os.path.splitext(f.name)[1]\n random_no = str(random.randint(0, 99999999)).zfill(8)\n # print random_no\n file_name = random_no + file_ext\n destination = open(path + file_name, 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n except Exception, e:\n logger.error(e.message)\n # print e\n return file_name\n\n\ndef find_demands(condition, page, pagesize, sort):\n demands = Demand.objects.all()\n if condition.keyword != \"\":\n demands = demands.filter(Q(name_cn__contains=condition.keyword) | Q(name_en__contains=condition.keyword))\n if condition.type != \"\" and condition.type != \"0\" and condition.type != 0:\n demands = demands.filter(service_type=condition.type)\n if condition.country_id != 0 and condition.country_id != \"\" and condition.country_id != \"0\":\n demands = demands.filter(Q(company_countries__id=condition.country_id)) #Q(company_countries=None) |\n if condition.province_id != 0 and condition.province_id != \"\" and condition.province_id != \"0\":\n demands = demands.filter(Q(company_provinces__id=condition.province_id)) #Q(company_provinces=None) |\n if condition.industry is not None:\n demands = demands.filter(Q(company_industries=condition.industry) | Q(company_industries__father=condition.industry) | Q(company_industries__father__father=condition.industry)) #Q(company_industries=None) |\n if condition.member_id != 0 and condition.member_id != \"\":\n demands = demands.filter(member_id=condition.member_id)\n if condition.status != -1 and condition.status != \"\":\n demands = demands.filter(status=condition.status)\n if page <= 1:\n page = 1\n if pagesize <= 1:\n pagesize = 1\n start_record = (int(page)-1) * int(pagesize)\n end_record = int(start_record) + int(pagesize)\n if sort == \"\":\n sort = \"time_desc\"\n if sort == \"time_desc\":\n demands = demands.order_by(\"-id\")\n elif sort == \"time_asc\":\n demands = demands.order_by(\"id\")\n elif sort == \"size_desc\":\n demands = demands.order_by(\"-deal_size\")\n elif sort == \"size_asc\":\n demands = demands.order_by(\"deal_size\")\n elif sort == \"hot_desc\":\n demands = demands.order_by(\"-pv\")\n\n total = demands.count()\n data = demands[start_record:end_record]\n return data, total\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from __future__ import print_function
import os, sys, time
import fitz
import PySimpleGUI as sg
"""
PyMuPDF utility
----------------
For a given entry in a page's getImagleList() list, function "recoverpix"
returns either the raw image data, or a modified pixmap if an /SMask entry
exists.
The item's first two entries are PDF xref numbers. The first one is the image in
question, the second one may be 0 or the object id of a soft-image mask. In this
case, we assume it being a sequence of alpha bytes belonging to our image.
We then create a new Pixmap giving it these alpha values, and return it.
If the result pixmap is CMYK, it will be converted to RGB first.
"""
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split("."))) >= (1, 13, 17):
raise SystemExit("require PyMuPDF v1.13.17+")
dimlimit = 100 # each image side must be greater than this
relsize = 0.05 # image : pixmap size ratio must be larger than this (5%)
abssize = 2048 # absolute image size limit 2 KB: ignore if smaller
imgdir = "images" # found images are stored in this subfolder
if not os.path.exists(imgdir):
os.mkdir(imgdir)
def recoverpix(doc, item):
x = item[0] # xref of PDF image
s = item[1] # xref of its /SMask
if s == 0: # no smask: use direct image output
return doc.extractImage(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
# we need to reconstruct the alpha channel with the smask
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry
# sanity check
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):
pix2 = None
return getimage(pix1)
pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added
pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value
pix1 = pix2 = None # free temp pixmaps
# we may need to adjust something for CMYK pixmaps here:
return getimage(pix)
fname = sys.argv[1] if len(sys.argv) == 2 else None
if not fname:
fname = sg.PopupGetFile("Select file:", title="PyMuPDF PDF Image Extraction")
if not fname:
raise SystemExit()
t0 = time.time()
doc = fitz.open(fname)
page_count = len(doc) # number of pages
xreflist = []
imglist = []
for pno in range(page_count):
sg.QuickMeter(
"Extract Images", # show our progress
pno + 1,
page_count,
"*** Scanning Pages ***",
)
il = doc.getPageImageList(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
if min(width, height) <= dimlimit:
continue
pix = recoverpix(doc, img)
if type(pix) is dict: # we got a raw image
ext = pix["ext"]
imgdata = pix["image"]
n = pix["colorspace"]
imgfile = os.path.join(imgdir, "img-%i.%s" % (xref, ext))
else: # we got a pixmap
imgfile = os.path.join(imgdir, "img-%i.png" % xref)
n = pix.n
imgdata = pix.getPNGData()
if len(imgdata) <= abssize:
continue
if len(imgdata) / (width * height * n) <= relsize:
continue
fout = open(imgfile, "wb")
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), "images in total")
print(len(xreflist), "images extracted")
print("total time %g sec" % (t1 - t0))
|
normal
|
{
"blob_id": "856afd30a2ed01a1d44bbe91a7b69998e9a51bb7",
"index": 3170,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-4": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n<mask token>\nprint(fitz.__doc__)\nif not tuple(map(int, fitz.version[0].split('.'))) >= (1, 13, 17):\n raise SystemExit('require PyMuPDF v1.13.17+')\ndimlimit = 100\nrelsize = 0.05\nabssize = 2048\nimgdir = 'images'\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0]\n s = item[1]\n if s == 0:\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s)\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and \n pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n pix = fitz.Pixmap(pix1)\n pix.setAlpha(pix2.samples)\n pix1 = pix2 = None\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile('Select file:', title=\n 'PyMuPDF PDF Image Extraction')\nif not fname:\n raise SystemExit()\nt0 = time.time()\ndoc = fitz.open(fname)\npage_count = len(doc)\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter('Extract Images', pno + 1, page_count,\n '*** Scanning Pages ***')\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict:\n ext = pix['ext']\n imgdata = pix['image']\n n = pix['colorspace']\n imgfile = os.path.join(imgdir, 'img-%i.%s' % (xref, ext))\n else:\n imgfile = os.path.join(imgdir, 'img-%i.png' % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n if len(imgdata) <= abssize:\n continue\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n fout = open(imgfile, 'wb')\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), 'images in total')\nprint(len(xreflist), 'images extracted')\nprint('total time %g sec' % (t1 - t0))\n",
"step-5": "from __future__ import print_function\nimport os, sys, time\nimport fitz\nimport PySimpleGUI as sg\n\n\"\"\"\nPyMuPDF utility\n----------------\nFor a given entry in a page's getImagleList() list, function \"recoverpix\"\nreturns either the raw image data, or a modified pixmap if an /SMask entry\nexists.\nThe item's first two entries are PDF xref numbers. The first one is the image in\nquestion, the second one may be 0 or the object id of a soft-image mask. In this\ncase, we assume it being a sequence of alpha bytes belonging to our image.\nWe then create a new Pixmap giving it these alpha values, and return it.\nIf the result pixmap is CMYK, it will be converted to RGB first.\n\"\"\"\nprint(fitz.__doc__)\n\nif not tuple(map(int, fitz.version[0].split(\".\"))) >= (1, 13, 17):\n raise SystemExit(\"require PyMuPDF v1.13.17+\")\n\ndimlimit = 100 # each image side must be greater than this\nrelsize = 0.05 # image : pixmap size ratio must be larger than this (5%)\nabssize = 2048 # absolute image size limit 2 KB: ignore if smaller\nimgdir = \"images\" # found images are stored in this subfolder\n\nif not os.path.exists(imgdir):\n os.mkdir(imgdir)\n\n\ndef recoverpix(doc, item):\n x = item[0] # xref of PDF image\n s = item[1] # xref of its /SMask\n if s == 0: # no smask: use direct image output\n return doc.extractImage(x)\n\n def getimage(pix):\n if pix.colorspace.n != 4:\n return pix\n tpix = fitz.Pixmap(fitz.csRGB, pix)\n return tpix\n\n # we need to reconstruct the alpha channel with the smask\n pix1 = fitz.Pixmap(doc, x)\n pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry\n\n # sanity check\n if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):\n pix2 = None\n return getimage(pix1)\n\n pix = fitz.Pixmap(pix1) # copy of pix1, alpha channel added\n pix.setAlpha(pix2.samples) # treat pix2.samples as alpha value\n pix1 = pix2 = None # free temp pixmaps\n\n # we may need to adjust something for CMYK pixmaps here:\n return getimage(pix)\n\n\nfname = sys.argv[1] if len(sys.argv) == 2 else None\nif not fname:\n fname = sg.PopupGetFile(\"Select file:\", title=\"PyMuPDF PDF Image Extraction\")\nif not fname:\n raise SystemExit()\n\nt0 = time.time()\ndoc = fitz.open(fname)\n\npage_count = len(doc) # number of pages\n\nxreflist = []\nimglist = []\nfor pno in range(page_count):\n sg.QuickMeter(\n \"Extract Images\", # show our progress\n pno + 1,\n page_count,\n \"*** Scanning Pages ***\",\n )\n\n il = doc.getPageImageList(pno)\n imglist.extend([x[0] for x in il])\n for img in il:\n xref = img[0]\n if xref in xreflist:\n continue\n width = img[2]\n height = img[3]\n if min(width, height) <= dimlimit:\n continue\n pix = recoverpix(doc, img)\n if type(pix) is dict: # we got a raw image\n ext = pix[\"ext\"]\n imgdata = pix[\"image\"]\n n = pix[\"colorspace\"]\n imgfile = os.path.join(imgdir, \"img-%i.%s\" % (xref, ext))\n else: # we got a pixmap\n imgfile = os.path.join(imgdir, \"img-%i.png\" % xref)\n n = pix.n\n imgdata = pix.getPNGData()\n\n if len(imgdata) <= abssize:\n continue\n\n if len(imgdata) / (width * height * n) <= relsize:\n continue\n\n fout = open(imgfile, \"wb\")\n fout.write(imgdata)\n fout.close()\n xreflist.append(xref)\n\nt1 = time.time()\nimglist = list(set(imglist))\nprint(len(set(imglist)), \"images in total\")\nprint(len(xreflist), \"images extracted\")\nprint(\"total time %g sec\" % (t1 - t0))",
"step-ids": [
0,
1,
3,
4,
5
]
}
|
[
0,
1,
3,
4,
5
] |
"""
Have the function CharlietheDog(strArr) read the array of strings stored in strArr which
will be a 4x4 matrix of the characters 'C', 'H', 'F', 'O', where C represents Charlie the dog,
H represents its home, F represents dog food, and O represents and empty space in the grid.
Your goal is to figure out the least amount of moves required to get Charlie to grab each piece of food in the grid by moving
up, down, left, or right, and then make it home right after.
Charlie cannot move onto the home before all pieces of food have been collected.
For example: if strArr is ["FOOF", "OCOO", "OOOH", "FOOO"], then this looks like the following grid:
F O O F
O C O O
O O O H
F O O O
For the input above, the least amount of steps where the dog can reach each piece of food,
and then return home is 11 steps, so your program should return the number 11.
The grid will always contain between 1 and 8 pieces of food.
Use the Parameter Testing feature in the box below to test your code with different arguments.
"""
from itertools import permutations
def CharlietheDog(strArr):
def walk(food_home, dog, matriz, steps=0):
food_home_dx = food_home[0][0] - dog[0]
food_home_dy = food_home[0][1] - dog[1]
walk_x = food_home_dx/(abs(food_home_dx) if food_home_dx != 0 else 1)
walk_y = food_home_dy/(abs(food_home_dy) if food_home_dy != 0 else 1)
steps += abs(walk_x) + abs(walk_y)
dog = (dog[0] + walk_x, dog[1] + walk_y)
if food_home[0] == dog:
food_home = food_home[1:]
food_home_size = len(food_home)
if food_home_size <= 0:
return steps
return walk(food_home, dog, matriz, steps)
food = []
home = None
dog = None
for i in range(len(strArr)):
for j in range(len(strArr[i])):
if strArr[i][j] == 'F':
food.append((i, j))
if strArr[i][j] == 'H':
home = (i, j)
if strArr[i][j] == 'C':
dog = (i, j)
foods = permutations(food)
min_steps = None
for food in foods:
food_home = food + (home, )
steps = walk(food_home, dog, strArr)
if min_steps == None or steps < min_steps:
min_steps = steps
return int(min_steps)
# keep this function call here
print (CharlietheDog(raw_input()))
|
normal
|
{
"blob_id": "731110b02c8a09dc84042a99c14eef990ae33cd2",
"index": 5913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\nprint(CharlietheDog(raw_input()))\n",
"step-4": "<mask token>\nfrom itertools import permutations\n\n\ndef CharlietheDog(strArr):\n\n def walk(food_home, dog, matriz, steps=0):\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n walk_x = food_home_dx / (abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy / (abs(food_home_dy) if food_home_dy != 0 else 1)\n steps += abs(walk_x) + abs(walk_y)\n dog = dog[0] + walk_x, dog[1] + walk_y\n if food_home[0] == dog:\n food_home = food_home[1:]\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n return walk(food_home, dog, matriz, steps)\n food = []\n home = None\n dog = None\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = i, j\n if strArr[i][j] == 'C':\n dog = i, j\n foods = permutations(food)\n min_steps = None\n for food in foods:\n food_home = food + (home,)\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n return int(min_steps)\n\n\nprint(CharlietheDog(raw_input()))\n",
"step-5": "\"\"\"\nHave the function CharlietheDog(strArr) read the array of strings stored in strArr which \nwill be a 4x4 matrix of the characters 'C', 'H', 'F', 'O', where C represents Charlie the dog,\n H represents its home, F represents dog food, and O represents and empty space in the grid. \n Your goal is to figure out the least amount of moves required to get Charlie to grab each piece of food in the grid by moving\n up, down, left, or right, and then make it home right after. \n Charlie cannot move onto the home before all pieces of food have been collected. \n For example: if strArr is [\"FOOF\", \"OCOO\", \"OOOH\", \"FOOO\"], then this looks like the following grid: \n \n F O O F\n O C O O\n O O O H\n F O O O \n\nFor the input above, the least amount of steps where the dog can reach each piece of food, \nand then return home is 11 steps, so your program should return the number 11. \nThe grid will always contain between 1 and 8 pieces of food. \n\nUse the Parameter Testing feature in the box below to test your code with different arguments.\n\"\"\"\n\nfrom itertools import permutations \n\ndef CharlietheDog(strArr):\n def walk(food_home, dog, matriz, steps=0):\n\n food_home_dx = food_home[0][0] - dog[0]\n food_home_dy = food_home[0][1] - dog[1]\n\n walk_x = food_home_dx/(abs(food_home_dx) if food_home_dx != 0 else 1)\n walk_y = food_home_dy/(abs(food_home_dy) if food_home_dy != 0 else 1)\n\n steps += abs(walk_x) + abs(walk_y)\n\n dog = (dog[0] + walk_x, dog[1] + walk_y)\n\n if food_home[0] == dog:\n food_home = food_home[1:]\n\n food_home_size = len(food_home)\n if food_home_size <= 0:\n return steps\n \n return walk(food_home, dog, matriz, steps)\n\n food = []\n home = None\n dog = None\n\n for i in range(len(strArr)):\n for j in range(len(strArr[i])):\n if strArr[i][j] == 'F':\n food.append((i, j))\n if strArr[i][j] == 'H':\n home = (i, j)\n if strArr[i][j] == 'C':\n dog = (i, j)\n\n foods = permutations(food)\n\n min_steps = None\n for food in foods:\n food_home = food + (home, )\n steps = walk(food_home, dog, strArr)\n if min_steps == None or steps < min_steps:\n min_steps = steps\n\n return int(min_steps)\n\n\n# keep this function call here \nprint (CharlietheDog(raw_input())) \n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from outils import Outils
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = "bilan-subsides-comptes_" + str(subedition.annee_fin_general) + "_" + \
Outils.mois_string(subedition.mois_fin_general) + ".csv"
with dossier_destination.writer(nom) as fichier_writer:
ligne = ["année", "mois", "code client", "code client sap", "abrév. labo", "nom labo", "type client",
"nature client", "id-compte", "numéro compte", "intitulé compte", "code type compte",
"code type subside", "Subsides MAj", "Subsides MOj"]
for categorie in subgeneraux.codes_d3():
ligne.append("Subsides " + categorie + "j")
ligne += ["total Subsides"]
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'], client['nature'], id_compte,
num_compte, compte['intitule'], compte['type'], compte['t3'],
Outils.format_2_dec(compte['s-mat']), Outils.format_2_dec(compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' + categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
normal
|
{
"blob_id": "53c874fbe14031c323f83db58f17990f4e60bc58",
"index": 2195,
"step-1": "<mask token>\n\n\nclass BilanComptes(object):\n <mask token>\n <mask token>\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-2": "<mask token>\n\n\nclass BilanComptes(object):\n <mask token>\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-3": "<mask token>\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-4": "from outils import Outils\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general\n ) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'\n with dossier_destination.writer(nom) as fichier_writer:\n ligne = ['année', 'mois', 'code client', 'code client sap',\n 'abrév. labo', 'nom labo', 'type client', 'nature client',\n 'id-compte', 'numéro compte', 'intitulé compte',\n 'code type compte', 'code type subside', 'Subsides MAj',\n 'Subsides MOj']\n for categorie in subgeneraux.codes_d3():\n ligne.append('Subsides ' + categorie + 'j')\n ligne += ['total Subsides']\n fichier_writer.writerow(ligne)\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n for id_compte, num_compte in sorted(numbers.items(), key=lambda\n x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.\n mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'],\n client['nature'], id_compte, num_compte, compte[\n 'intitule'], compte['type'], compte['t3'], Outils.\n format_2_dec(compte['s-mat']), Outils.format_2_dec(\n compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' +\n categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-5": "from outils import Outils\n\n\nclass BilanComptes(object):\n \"\"\"\n Classe pour la création du bilan des comptes\n \"\"\"\n\n @staticmethod\n def bilan(dossier_destination, subedition, subgeneraux, lignes):\n \"\"\"\n création du bilan\n :param dossier_destination: Une instance de la classe dossier.DossierDestination\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param lignes: lignes de données du bilan\n \"\"\"\n nom = \"bilan-subsides-comptes_\" + str(subedition.annee_fin_general) + \"_\" + \\\n Outils.mois_string(subedition.mois_fin_general) + \".csv\"\n\n with dossier_destination.writer(nom) as fichier_writer:\n\n ligne = [\"année\", \"mois\", \"code client\", \"code client sap\", \"abrév. labo\", \"nom labo\", \"type client\",\n \"nature client\", \"id-compte\", \"numéro compte\", \"intitulé compte\", \"code type compte\",\n \"code type subside\", \"Subsides MAj\", \"Subsides MOj\"]\n for categorie in subgeneraux.codes_d3():\n ligne.append(\"Subsides \" + categorie + \"j\")\n ligne += [\"total Subsides\"]\n fichier_writer.writerow(ligne)\n\n for ligne in lignes:\n fichier_writer.writerow(ligne)\n\n @staticmethod\n def creation_lignes(subedition, subgeneraux, consolidation):\n \"\"\"\n génération des lignes de données du bilan\n :param subedition: paramètres d'édition\n :param subgeneraux: paramètres généraux\n :param consolidation: classe de consolidation des données des bilans\n :return: lignes de données du bilan\n \"\"\"\n lignes = []\n for code_client, client in sorted(consolidation.clients.items()):\n\n numbers = {}\n for id_compte, compte in client['comptes'].items():\n numbers[id_compte] = compte['num_compte']\n\n for id_compte, num_compte in sorted(numbers.items(), key=lambda x: x[1]):\n compte = client['comptes'][id_compte]\n if compte['subs'] > 0:\n ligne = [subedition.annee_fin_general, subedition.mois_fin_general, code_client, client['sap'],\n client['abrev'], client['nom'], client['type'], client['nature'], id_compte,\n num_compte, compte['intitule'], compte['type'], compte['t3'],\n Outils.format_2_dec(compte['s-mat']), Outils.format_2_dec(compte['s-mot'])]\n for categorie in subgeneraux.codes_d3():\n ligne.append(Outils.format_2_dec(compte['s-' + categorie + 't']))\n ligne += [Outils.format_2_dec(compte['subs'])]\n lignes.append(ligne)\n return lignes\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from platypush.message.event import Event
class ClipboardEvent(Event):
def __init__(self, text: str, *args, **kwargs):
super().__init__(*args, text=text, **kwargs)
# vim:sw=4:ts=4:et:
|
normal
|
{
"blob_id": "9b02ce0b3acb14bdd6463c5bdba865b28253767c",
"index": 7896,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClipboardEvent(Event):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ClipboardEvent(Event):\n\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n",
"step-4": "from platypush.message.event import Event\n\n\nclass ClipboardEvent(Event):\n\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n",
"step-5": "from platypush.message.event import Event\n\n\nclass ClipboardEvent(Event):\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n\n\n# vim:sw=4:ts=4:et:\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""Integration to integrate Keymitt BLE devices with Home Assistant."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from microbot import MicroBotApiClient, parse_advertisement_data
from homeassistant.components import bluetooth
from homeassistant.components.bluetooth.passive_update_coordinator import (
PassiveBluetoothDataUpdateCoordinator,
)
from homeassistant.const import Platform
from homeassistant.core import HomeAssistant, callback
if TYPE_CHECKING:
from bleak.backends.device import BLEDevice
_LOGGER: logging.Logger = logging.getLogger(__package__)
PLATFORMS: list[str] = [Platform.SWITCH]
class MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):
"""Class to manage fetching data from the MicroBot."""
def __init__(
self,
hass: HomeAssistant,
client: MicroBotApiClient,
ble_device: BLEDevice,
) -> None:
"""Initialize."""
self.api: MicroBotApiClient = client
self.data: dict[str, Any] = {}
self.ble_device = ble_device
super().__init__(
hass,
_LOGGER,
ble_device.address,
bluetooth.BluetoothScanningMode.ACTIVE,
)
@callback
def _async_handle_bluetooth_event(
self,
service_info: bluetooth.BluetoothServiceInfoBleak,
change: bluetooth.BluetoothChange,
) -> None:
"""Handle a Bluetooth event."""
if adv := parse_advertisement_data(
service_info.device, service_info.advertisement
):
self.data = adv.data
_LOGGER.debug("%s: MicroBot data: %s", self.ble_device.address, self.data)
self.api.update_from_advertisement(adv)
super()._async_handle_bluetooth_event(service_info, change)
|
normal
|
{
"blob_id": "5509880c30c2e03ca6eb42ad32018c39fb5939ed",
"index": 9955,
"step-1": "<mask token>\n\n\nclass MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):\n <mask token>\n\n def __init__(self, hass: HomeAssistant, client: MicroBotApiClient,\n ble_device: BLEDevice) ->None:\n \"\"\"Initialize.\"\"\"\n self.api: MicroBotApiClient = client\n self.data: dict[str, Any] = {}\n self.ble_device = ble_device\n super().__init__(hass, _LOGGER, ble_device.address, bluetooth.\n BluetoothScanningMode.ACTIVE)\n\n @callback\n def _async_handle_bluetooth_event(self, service_info: bluetooth.\n BluetoothServiceInfoBleak, change: bluetooth.BluetoothChange) ->None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n if (adv := parse_advertisement_data(service_info.device,\n service_info.advertisement)):\n self.data = adv.data\n _LOGGER.debug('%s: MicroBot data: %s', self.ble_device.address,\n self.data)\n self.api.update_from_advertisement(adv)\n super()._async_handle_bluetooth_event(service_info, change)\n",
"step-2": "<mask token>\n\n\nclass MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):\n \"\"\"Class to manage fetching data from the MicroBot.\"\"\"\n\n def __init__(self, hass: HomeAssistant, client: MicroBotApiClient,\n ble_device: BLEDevice) ->None:\n \"\"\"Initialize.\"\"\"\n self.api: MicroBotApiClient = client\n self.data: dict[str, Any] = {}\n self.ble_device = ble_device\n super().__init__(hass, _LOGGER, ble_device.address, bluetooth.\n BluetoothScanningMode.ACTIVE)\n\n @callback\n def _async_handle_bluetooth_event(self, service_info: bluetooth.\n BluetoothServiceInfoBleak, change: bluetooth.BluetoothChange) ->None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n if (adv := parse_advertisement_data(service_info.device,\n service_info.advertisement)):\n self.data = adv.data\n _LOGGER.debug('%s: MicroBot data: %s', self.ble_device.address,\n self.data)\n self.api.update_from_advertisement(adv)\n super()._async_handle_bluetooth_event(service_info, change)\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from bleak.backends.device import BLEDevice\n_LOGGER: logging.Logger = logging.getLogger(__package__)\nPLATFORMS: list[str] = [Platform.SWITCH]\n\n\nclass MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):\n \"\"\"Class to manage fetching data from the MicroBot.\"\"\"\n\n def __init__(self, hass: HomeAssistant, client: MicroBotApiClient,\n ble_device: BLEDevice) ->None:\n \"\"\"Initialize.\"\"\"\n self.api: MicroBotApiClient = client\n self.data: dict[str, Any] = {}\n self.ble_device = ble_device\n super().__init__(hass, _LOGGER, ble_device.address, bluetooth.\n BluetoothScanningMode.ACTIVE)\n\n @callback\n def _async_handle_bluetooth_event(self, service_info: bluetooth.\n BluetoothServiceInfoBleak, change: bluetooth.BluetoothChange) ->None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n if (adv := parse_advertisement_data(service_info.device,\n service_info.advertisement)):\n self.data = adv.data\n _LOGGER.debug('%s: MicroBot data: %s', self.ble_device.address,\n self.data)\n self.api.update_from_advertisement(adv)\n super()._async_handle_bluetooth_event(service_info, change)\n",
"step-4": "<mask token>\nfrom __future__ import annotations\nimport logging\nfrom typing import TYPE_CHECKING, Any\nfrom microbot import MicroBotApiClient, parse_advertisement_data\nfrom homeassistant.components import bluetooth\nfrom homeassistant.components.bluetooth.passive_update_coordinator import PassiveBluetoothDataUpdateCoordinator\nfrom homeassistant.const import Platform\nfrom homeassistant.core import HomeAssistant, callback\nif TYPE_CHECKING:\n from bleak.backends.device import BLEDevice\n_LOGGER: logging.Logger = logging.getLogger(__package__)\nPLATFORMS: list[str] = [Platform.SWITCH]\n\n\nclass MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):\n \"\"\"Class to manage fetching data from the MicroBot.\"\"\"\n\n def __init__(self, hass: HomeAssistant, client: MicroBotApiClient,\n ble_device: BLEDevice) ->None:\n \"\"\"Initialize.\"\"\"\n self.api: MicroBotApiClient = client\n self.data: dict[str, Any] = {}\n self.ble_device = ble_device\n super().__init__(hass, _LOGGER, ble_device.address, bluetooth.\n BluetoothScanningMode.ACTIVE)\n\n @callback\n def _async_handle_bluetooth_event(self, service_info: bluetooth.\n BluetoothServiceInfoBleak, change: bluetooth.BluetoothChange) ->None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n if (adv := parse_advertisement_data(service_info.device,\n service_info.advertisement)):\n self.data = adv.data\n _LOGGER.debug('%s: MicroBot data: %s', self.ble_device.address,\n self.data)\n self.api.update_from_advertisement(adv)\n super()._async_handle_bluetooth_event(service_info, change)\n",
"step-5": "\"\"\"Integration to integrate Keymitt BLE devices with Home Assistant.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Any\n\nfrom microbot import MicroBotApiClient, parse_advertisement_data\n\nfrom homeassistant.components import bluetooth\nfrom homeassistant.components.bluetooth.passive_update_coordinator import (\n PassiveBluetoothDataUpdateCoordinator,\n)\nfrom homeassistant.const import Platform\nfrom homeassistant.core import HomeAssistant, callback\n\nif TYPE_CHECKING:\n from bleak.backends.device import BLEDevice\n\n_LOGGER: logging.Logger = logging.getLogger(__package__)\nPLATFORMS: list[str] = [Platform.SWITCH]\n\n\nclass MicroBotDataUpdateCoordinator(PassiveBluetoothDataUpdateCoordinator):\n \"\"\"Class to manage fetching data from the MicroBot.\"\"\"\n\n def __init__(\n self,\n hass: HomeAssistant,\n client: MicroBotApiClient,\n ble_device: BLEDevice,\n ) -> None:\n \"\"\"Initialize.\"\"\"\n self.api: MicroBotApiClient = client\n self.data: dict[str, Any] = {}\n self.ble_device = ble_device\n super().__init__(\n hass,\n _LOGGER,\n ble_device.address,\n bluetooth.BluetoothScanningMode.ACTIVE,\n )\n\n @callback\n def _async_handle_bluetooth_event(\n self,\n service_info: bluetooth.BluetoothServiceInfoBleak,\n change: bluetooth.BluetoothChange,\n ) -> None:\n \"\"\"Handle a Bluetooth event.\"\"\"\n if adv := parse_advertisement_data(\n service_info.device, service_info.advertisement\n ):\n self.data = adv.data\n _LOGGER.debug(\"%s: MicroBot data: %s\", self.ble_device.address, self.data)\n self.api.update_from_advertisement(adv)\n super()._async_handle_bluetooth_event(service_info, change)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
print(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))
|
normal
|
{
"blob_id": "b2fecadbd99edb89379f82a935aa1622f043eeac",
"index": 9099,
"step-1": "<mask token>\n",
"step-2": "print(sum([(row[lineNumber * 3 % len(row)] == '#') for lineNumber, row in\n enumerate(open('input.txt').read().splitlines())]))\n",
"step-3": "#!/usr/bin/env python3\n\nprint(sum([row[lineNumber * 3 % len(row)] == '#' for lineNumber, row in enumerate(open('input.txt').read().splitlines())]))",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Manager orchestrates the overall process of running web tests.
This includes finding tests to run, reading the test expectations,
starting the required helper servers, deciding the order and way to
run the tests, retrying failed tests, and collecting the test results,
including crash logs and mismatches with expectations.
The Manager object has a constructor and one main method called run.
"""
import fnmatch
import json
import logging
import os
import random
import signal
import sys
import time
from blinkpy.common import exit_codes
from blinkpy.common.path_finder import PathFinder
from blinkpy.tool import grammar
from blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink
from blinkpy.web_tests.controllers.web_test_finder import WebTestFinder
from blinkpy.web_tests.controllers.web_test_runner import WebTestRunner
from blinkpy.web_tests.layout_package import json_results_generator
from blinkpy.web_tests.models import test_expectations
from blinkpy.web_tests.models import test_failures
from blinkpy.web_tests.models import test_run_results
from blinkpy.web_tests.models.typ_types import ResultType
from blinkpy.web_tests.models.test_input import TestInput
_log = logging.getLogger(__name__)
TestExpectations = test_expectations.TestExpectations
class Manager(object):
"""A class for managing running a series of web tests."""
HTTP_SUBDIR = 'http'
PERF_SUBDIR = 'perf'
WEBSOCKET_SUBDIR = 'websocket'
ARCHIVED_RESULTS_LIMIT = 25
def __init__(self, port, options, printer):
"""Initializes test runner data structures.
Args:
port: An object implementing platform-specific functionality.
options: An options argument which contains command line options.
printer: A Printer object to record updates to.
"""
self._port = port
self._filesystem = port.host.filesystem
self._options = options
self._printer = printer
self._expectations = None
self._http_server_started = False
self._wptserve_started = False
self._websockets_server_started = False
self._results_directory = self._port.results_directory()
self._artifacts_directory = self._port.artifacts_directory()
self._finder = WebTestFinder(self._port, self._options)
self._path_finder = PathFinder(port.host.filesystem)
self._sink = CreateTestResultSink(self._port)
self._runner = WebTestRunner(self._options, self._port, self._printer,
self._results_directory,
self._test_is_slow, self._sink)
def run(self, args):
"""Runs the tests and return a RunDetails object with the results."""
start_time = time.time()
self._printer.write_update('Collecting tests ...')
running_all_tests = False
try:
paths, all_test_names, running_all_tests = self._collect_tests(
args)
except IOError:
# This is raised if --test-list doesn't exist
return test_run_results.RunDetails(
exit_code=exit_codes.NO_TESTS_EXIT_STATUS)
test_names = self._finder.split_into_chunks(all_test_names)
if self._options.order == 'natural':
test_names.sort(key=self._port.test_key)
elif self._options.order == 'random':
test_names.sort()
random.Random(self._options.seed).shuffle(test_names)
elif self._options.order == 'none':
# Restore the test order to user specified order.
# base.tests() may change the order as it returns tests in the
# real, external/wpt, virtual order.
if paths:
test_names = self._restore_order(paths, test_names)
if not self._options.no_expectations:
self._printer.write_update('Parsing expectations ...')
self._expectations = test_expectations.TestExpectations(self._port)
tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
self._printer.print_found(
len(all_test_names), len(test_names), len(tests_to_run),
self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
if not tests_to_run:
msg = 'No tests to run.'
if self._options.zero_tests_executed_ok:
_log.info(msg)
# Keep executing to produce valid (but empty) results.
else:
_log.critical(msg)
code = exit_codes.NO_TESTS_EXIT_STATUS
return test_run_results.RunDetails(exit_code=code)
exit_code = self._set_up_run(tests_to_run)
if exit_code:
return test_run_results.RunDetails(exit_code=exit_code)
if self._options.num_retries is None:
# If --test-list is passed, or if no test narrowing is specified,
# default to 3 retries. Otherwise [e.g. if tests are being passed by
# name], default to 0 retries.
if self._options.test_list or len(paths) < len(test_names):
self._options.num_retries = 3
else:
self._options.num_retries = 0
should_retry_failures = self._options.num_retries > 0
try:
self._register_termination_handler()
self._start_servers(tests_to_run)
if self._options.watch:
run_results = self._run_test_loop(tests_to_run, tests_to_skip)
else:
run_results = self._run_test_once(tests_to_run, tests_to_skip,
should_retry_failures)
initial_results, all_retry_results = run_results
finally:
_log.info("Finally stop servers and clean up")
self._stop_servers()
self._clean_up_run()
if self._options.no_expectations:
return test_run_results.RunDetails(0, [], [], initial_results,
all_retry_results)
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
self._printer.write_update('Looking for new crash logs ...')
self._look_for_new_crash_logs(initial_results, start_time)
for retry_attempt_results in all_retry_results:
self._look_for_new_crash_logs(retry_attempt_results, start_time)
self._printer.write_update('Summarizing results ...')
summarized_full_results = test_run_results.summarize_results(
self._port, self._options, self._expectations, initial_results,
all_retry_results)
summarized_failing_results = test_run_results.summarize_results(
self._port,
self._options,
self._expectations,
initial_results,
all_retry_results,
only_include_failing=True)
run_histories = test_run_results.test_run_histories(
self._options, self._expectations, initial_results,
all_retry_results)
exit_code = summarized_failing_results['num_regressions']
if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:
_log.warning('num regressions (%d) exceeds max exit status (%d)',
exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)
exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS
if not self._options.dry_run:
self._write_json_files(summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories)
self._copy_results_html_file(self._artifacts_directory,
'results.html')
if (initial_results.interrupt_reason is
test_run_results.InterruptReason.EXTERNAL_SIGNAL):
exit_code = exit_codes.INTERRUPTED_EXIT_STATUS
else:
if initial_results.interrupted:
exit_code = exit_codes.EARLY_EXIT_STATUS
if (self._options.show_results
and (exit_code or initial_results.total_failures)):
self._port.show_results_html_file(
self._filesystem.join(self._artifacts_directory,
'results.html'))
self._printer.print_results(time.time() - start_time,
initial_results)
return test_run_results.RunDetails(exit_code, summarized_full_results,
summarized_failing_results,
initial_results, all_retry_results)
def _register_termination_handler(self):
if self._port.host.platform.is_win():
signum = signal.SIGBREAK
else:
signum = signal.SIGTERM
signal.signal(signum, self._on_termination)
def _on_termination(self, signum, _frame):
self._printer.write_update(
'Received signal "%s" (%d) in %d' %
(signal.strsignal(signum), signum, os.getpid()))
raise KeyboardInterrupt
def _run_test_loop(self, tests_to_run, tests_to_skip):
# Don't show results in a new browser window because we're already
# printing the link to diffs in the loop
self._options.show_results = False
while True:
initial_results, all_retry_results = self._run_test_once(
tests_to_run, tests_to_skip, should_retry_failures=False)
for name in initial_results.failures_by_name:
failure = initial_results.failures_by_name[name][0]
if isinstance(failure, test_failures.FailureTextMismatch):
full_test_path = self._filesystem.join(
self._artifacts_directory, name)
filename, _ = self._filesystem.splitext(full_test_path)
pretty_diff_path = 'file://' + filename + '-pretty-diff.html'
self._printer.writeln('Link to pretty diff:')
self._printer.writeln(pretty_diff_path + '\n')
self._printer.writeln('Finished running tests')
user_input = self._port.host.user.prompt(
'Interactive watch mode: (q)uit (r)etry\n').lower()
if user_input == 'q' or user_input == 'quit':
return (initial_results, all_retry_results)
def _run_test_once(self, tests_to_run, tests_to_skip,
should_retry_failures):
num_workers = int(
self._port.num_workers(int(self._options.child_processes)))
initial_results = self._run_tests(
tests_to_run, tests_to_skip, self._options.repeat_each,
self._options.iterations, num_workers)
# Don't retry failures when interrupted by user or failures limit exception.
should_retry_failures = (should_retry_failures
and not initial_results.interrupted)
tests_to_retry = self._tests_to_retry(initial_results)
all_retry_results = []
if should_retry_failures and tests_to_retry:
for retry_attempt in range(1, self._options.num_retries + 1):
if not tests_to_retry:
break
_log.info('')
_log.info(
'Retrying %s, attempt %d of %d...',
grammar.pluralize('unexpected failure',
len(tests_to_retry)), retry_attempt,
self._options.num_retries)
retry_results = self._run_tests(
tests_to_retry,
tests_to_skip=set(),
repeat_each=1,
iterations=1,
num_workers=num_workers,
retry_attempt=retry_attempt)
all_retry_results.append(retry_results)
tests_to_retry = self._tests_to_retry(retry_results)
return (initial_results, all_retry_results)
def _restore_order(self, paths, test_names):
original_test_names = list(test_names)
test_names = []
for path in paths:
for test in original_test_names:
if test.startswith(path) or fnmatch.fnmatch(test, path):
test_names.append(test)
test_names += list(set(original_test_names) - set(test_names))
return test_names
def _collect_tests(self, args):
return self._finder.find_tests(
args,
test_lists=self._options.test_list,
filter_files=self._options.isolated_script_test_filter_file,
fastest_percentile=self._options.fastest,
filters=self._options.isolated_script_test_filter)
def _is_http_test(self, test):
return (
test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)
or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR
+ self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)
def _is_websocket_test(self, test):
if self._port.should_use_wptserve(test):
return False
return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test
def _http_tests(self, test_names):
return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return (self.PERF_SUBDIR == test
or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)
def _prepare_lists(self, paths, test_names):
tests_to_skip = self._finder.skip_tests(paths, test_names,
self._expectations)
tests_to_run = [
test for test in test_names if test not in tests_to_skip
]
return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file, retry_attempt):
return TestInput(
test_file,
self._options.slow_timeout_ms
if self._test_is_slow(test_file) else self._options.timeout_ms,
self._test_requires_lock(test_file),
retry_attempt=retry_attempt)
def _test_requires_lock(self, test_file):
"""Returns True if the test needs to be locked when running multiple
instances of this test runner.
Perf tests are locked because heavy load caused by running other
tests in parallel might cause some of them to time out.
"""
return self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
if not self._expectations:
return False
is_slow_test = self._expectations.get_expectations(
test_file).is_slow_test
return is_slow_test or self._port.is_slow_wpt_test(test_file)
def _needs_servers(self, test_names):
return any(
self._is_http_test(test_name) for test_name in test_names)
def _set_up_run(self, test_names):
self._printer.write_update('Checking build ...')
if self._options.build:
exit_code = self._port.check_build(
self._needs_servers(test_names), self._printer)
if exit_code:
_log.error('Build check failed')
return exit_code
if self._options.clobber_old_results:
self._port.clobber_old_results()
elif self._filesystem.exists(self._artifacts_directory):
self._port.limit_archived_results_count()
# Rename the existing results folder for archiving.
self._port.rename_results_folder()
# Create the output directory if it doesn't already exist.
self._port.host.filesystem.maybe_make_directory(
self._artifacts_directory)
exit_code = self._port.setup_test_run()
if exit_code:
_log.error('Build setup failed')
return exit_code
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update('Checking system dependencies ...')
exit_code = self._port.check_sys_deps()
if exit_code:
return exit_code
return exit_codes.OK_EXIT_STATUS
def _run_tests(self,
tests_to_run,
tests_to_skip,
repeat_each,
iterations,
num_workers,
retry_attempt=0):
test_inputs = []
for _ in range(iterations):
for test in tests_to_run:
for _ in range(repeat_each):
test_inputs.append(
self._test_input_for_file(test, retry_attempt))
return self._runner.run_tests(self._expectations, test_inputs,
tests_to_skip, num_workers,
retry_attempt)
def _start_servers(self, tests_to_run):
if any(self._port.is_wpt_test(test) for test in tests_to_run):
self._printer.write_update('Starting WPTServe ...')
self._port.start_wptserve()
self._wptserve_started = True
if (self._port.requires_http_server()
or any(self._is_http_test(test) for test in tests_to_run)):
self._printer.write_update('Starting HTTP server ...')
self._port.start_http_server(
additional_dirs={},
number_of_drivers=self._options.max_locked_shards)
self._http_server_started = True
if any(self._is_websocket_test(test) for test in tests_to_run):
self._printer.write_update('Starting WebSocket server ...')
self._port.start_websocket_server()
self._websockets_server_started = True
def _stop_servers(self):
if self._wptserve_started:
self._printer.write_update('Stopping WPTServe ...')
self._wptserve_started = False
self._port.stop_wptserve()
if self._http_server_started:
self._printer.write_update('Stopping HTTP server ...')
self._http_server_started = False
self._port.stop_http_server()
if self._websockets_server_started:
self._printer.write_update('Stopping WebSocket server ...')
self._websockets_server_started = False
self._port.stop_websocket_server()
def _clean_up_run(self):
_log.debug('Flushing stdout')
sys.stdout.flush()
_log.debug('Flushing stderr')
sys.stderr.flush()
_log.debug('Cleaning up port')
self._port.clean_up_test_run()
if self._sink:
_log.debug('Closing sink')
self._sink.close()
def _look_for_new_crash_logs(self, run_results, start_time):
"""Looks for and writes new crash logs, at the end of the test run.
Since crash logs can take a long time to be written out if the system is
under stress, do a second pass at the end of the test run.
Args:
run_results: The results of the test run.
start_time: Time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
test_to_crash_failure = {}
# reset static variables for Failure type classes
test_failures.AbstractTestResultType.port = self._port
test_failures.AbstractTestResultType.result_directory = self._results_directory
test_failures.AbstractTestResultType.filesystem = self._filesystem
for test, result in run_results.unexpected_results_by_name.items():
if result.type != ResultType.Crash:
continue
for failure in result.failures:
if (not isinstance(failure, test_failures.FailureCrash)
or failure.has_log):
continue
crashed_processes.append(
[test, failure.process_name, failure.pid])
test_to_crash_failure[test] = failure
sample_files = self._port.look_for_new_samples(crashed_processes,
start_time) or {}
for test, sample_file in sample_files.items():
test_failures.AbstractTestResultType.test_name = test
test_result = run_results.unexpected_results_by_name[test]
artifact_relative_path = self._port.output_filename(
test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')
artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()
artifact_abspath = self._filesystem.join(self._results_directory,
artifacts_sub_dir,
artifact_relative_path)
self._filesystem.maybe_make_directory(
self._filesystem.dirname(artifact_abspath))
self._filesystem.copyfile(sample_file, artifact_abspath)
test_result.artifacts.AddArtifact(
'sample_file',
self._filesystem.join(artifacts_sub_dir,
artifact_relative_path))
new_crash_logs = self._port.look_for_new_crash_logs(
crashed_processes, start_time) or {}
for test, (crash_log, crash_site) in new_crash_logs.items():
test_failures.AbstractTestResultType.test_name = test
failure.crash_log = crash_log
failure.has_log = self._port.output_contains_sanitizer_messages(
failure.crash_log)
test_result = run_results.unexpected_results_by_name[test]
test_result.crash_site = crash_site
test_to_crash_failure[test].create_artifacts(
test_result.artifacts, force_overwrite=True)
def _tests_to_retry(self, run_results):
# TODO(ojan): This should also check that result.type != test_expectations.MISSING
# since retrying missing expectations is silly. But that's a bit tricky since we
# only consider the last retry attempt for the count of unexpected regressions.
return [
result.test_name
for result in run_results.unexpected_results_by_name.values()
if result.type != ResultType.Pass
]
def _write_json_files(self, summarized_full_results,
summarized_failing_results, initial_results,
running_all_tests, run_histories):
_log.debug("Writing JSON files in %s.", self._artifacts_directory)
# FIXME: Upload stats.json to the server and delete times_ms.
times_trie = json_results_generator.test_timings_trie(
initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._artifacts_directory,
'times_ms.json')
json_results_generator.write_json(self._filesystem, times_trie,
times_json_path)
# Save out the times data so we can use it for --fastest in the future.
if running_all_tests:
bot_test_times_path = self._port.bot_test_times_path()
self._filesystem.maybe_make_directory(
self._filesystem.dirname(bot_test_times_path))
json_results_generator.write_json(self._filesystem, times_trie,
bot_test_times_path)
stats_trie = self._stats_trie(initial_results)
stats_path = self._filesystem.join(self._artifacts_directory,
'stats.json')
self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
full_results_path = self._filesystem.join(self._artifacts_directory,
'full_results.json')
json_results_generator.write_json(
self._filesystem, summarized_full_results, full_results_path)
full_results_jsonp_path = self._filesystem.join(
self._artifacts_directory, 'full_results_jsonp.js')
json_results_generator.write_json(
self._filesystem,
summarized_full_results,
full_results_jsonp_path,
callback='ADD_FULL_RESULTS')
failing_results_path = self._filesystem.join(self._artifacts_directory,
'failing_results.json')
# We write failing_results.json out as jsonp because we need to load it
# from a file url for results.html and Chromium doesn't allow that.
json_results_generator.write_json(
self._filesystem,
summarized_failing_results,
failing_results_path,
callback='ADD_RESULTS')
if self._options.json_test_results:
json_results_generator.write_json(self._filesystem,
summarized_full_results,
self._options.json_test_results)
if self._options.write_run_histories_to:
json_results_generator.write_json(
self._filesystem, run_histories,
self._options.write_run_histories_to)
_log.debug('Finished writing JSON files.')
def _copy_results_html_file(self, destination_dir, filename):
"""Copies a file from the template directory to the results directory."""
files_to_copy = [filename, filename + ".version"]
template_dir = self._path_finder.path_from_blink_tools(
'blinkpy', 'web_tests')
for filename in files_to_copy:
source_path = self._filesystem.join(template_dir, filename)
destination_path = self._filesystem.join(destination_dir, filename)
# Note that the results.html template file won't exist when
# we're using a MockFileSystem during unit tests, so make sure
# it exists before we try to copy it.
if self._filesystem.exists(source_path):
self._filesystem.copyfile(source_path, destination_path)
def _stats_trie(self, initial_results):
def _worker_number(worker_name):
return int(worker_name.split('/')[1]) if worker_name else -1
stats = {}
for result in initial_results.results_by_name.values():
if result.type != ResultType.Skip:
stats[result.test_name] = {
'results': (_worker_number(result.worker_name),
result.test_number, result.pid,
int(result.test_run_time * 1000),
int(result.total_run_time * 1000))
}
stats_trie = {}
for name, value in stats.items():
json_results_generator.add_path_to_trie(name, value, stats_trie)
return stats_trie
|
normal
|
{
"blob_id": "08b57c00beb8dfedfee1bc032b8c281d7a151931",
"index": 8033,
"step-1": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n <mask token>\n <mask token>\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n <mask token>\n <mask token>\n <mask token>\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n <mask token>\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n <mask token>\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n <mask token>\n <mask token>\n <mask token>\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n <mask token>\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-3": "<mask token>\n\n\nclass Manager(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n <mask token>\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update('Received signal \"%s\" (%d) in %d' % (\n signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n <mask token>\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n <mask token>\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n if self._port.requires_http_server() or any(self._is_http_test(test\n ) for test in tests_to_run):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n return [result.test_name for result in run_results.\n unexpected_results_by_name.values() if result.type !=\n ResultType.Pass]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-4": "<mask token>\n_log = logging.getLogger(__name__)\nTestExpectations = test_expectations.TestExpectations\n\n\nclass Manager(object):\n \"\"\"A class for managing running a series of web tests.\"\"\"\n HTTP_SUBDIR = 'http'\n PERF_SUBDIR = 'perf'\n WEBSOCKET_SUBDIR = 'websocket'\n ARCHIVED_RESULTS_LIMIT = 25\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self.\n _printer, self._results_directory, self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(args\n )\n except IOError:\n return test_run_results.RunDetails(exit_code=exit_codes.\n NO_TESTS_EXIT_STATUS)\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n if paths:\n test_names = self._restore_order(paths, test_names)\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n self._printer.print_found(len(all_test_names), len(test_names), len\n (tests_to_run), self._options.repeat_each, self._options.iterations\n )\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n if self._options.num_retries is None:\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n should_retry_failures = self._options.num_retries > 0\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run,\n tests_to_skip, should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info('Finally stop servers and clean up')\n self._stop_servers()\n self._clean_up_run()\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(self.\n _port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(self\n ._port, self._options, self._expectations, initial_results,\n all_retry_results, only_include_failing=True)\n run_histories = test_run_results.test_run_histories(self._options,\n self._expectations, initial_results, all_retry_results)\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is test_run_results.\n InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if self._options.show_results and (exit_code or\n initial_results.total_failures):\n self._port.show_results_html_file(self._filesystem.join\n (self._artifacts_directory, 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n return test_run_results.RunDetails(exit_code,\n summarized_full_results, summarized_failing_results,\n initial_results, all_retry_results)\n\n def _register_termination_handler(self):\n if self._port.host.platform.is_win():\n signum = signal.SIGBREAK\n else:\n signum = signal.SIGTERM\n signal.signal(signum, self._on_termination)\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update('Received signal \"%s\" (%d) in %d' % (\n signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n self._options.show_results = False\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(self.\n _artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = ('file://' + filename +\n '-pretty-diff.html')\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n if user_input == 'q' or user_input == 'quit':\n return initial_results, all_retry_results\n\n def _run_test_once(self, tests_to_run, tests_to_skip, should_retry_failures\n ):\n num_workers = int(self._port.num_workers(int(self._options.\n child_processes)))\n initial_results = self._run_tests(tests_to_run, tests_to_skip, self\n ._options.repeat_each, self._options.iterations, num_workers)\n should_retry_failures = (should_retry_failures and not\n initial_results.interrupted)\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n _log.info('')\n _log.info('Retrying %s, attempt %d of %d...', grammar.\n pluralize('unexpected failure', len(tests_to_retry)),\n retry_attempt, self._options.num_retries)\n retry_results = self._run_tests(tests_to_retry,\n tests_to_skip=set(), repeat_each=1, iterations=1,\n num_workers=num_workers, retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n tests_to_retry = self._tests_to_retry(retry_results)\n return initial_results, all_retry_results\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n\n def _collect_tests(self, args):\n return self._finder.find_tests(args, test_lists=self._options.\n test_list, filter_files=self._options.\n isolated_script_test_filter_file, fastest_percentile=self.\n _options.fastest, filters=self._options.isolated_script_test_filter\n )\n\n def _is_http_test(self, test):\n return (test.startswith(self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR) or self._is_websocket_test(test) or self.\n _port.TEST_PATH_SEPARATOR + self.HTTP_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test or self.PERF_SUBDIR + self._port.\n TEST_PATH_SEPARATOR in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names, self.\n _expectations)\n tests_to_run = [test for test in test_names if test not in\n tests_to_skip]\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(test_file, self._options.slow_timeout_ms if self.\n _test_is_slow(test_file) else self._options.timeout_ms, self.\n _test_requires_lock(test_file), retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(test_file\n ).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(self._is_http_test(test_name) for test_name in test_names)\n\n def _set_up_run(self, test_names):\n self._printer.write_update('Checking build ...')\n if self._options.build:\n exit_code = self._port.check_build(self._needs_servers(\n test_names), self._printer)\n if exit_code:\n _log.error('Build check failed')\n return exit_code\n if self._options.clobber_old_results:\n self._port.clobber_old_results()\n elif self._filesystem.exists(self._artifacts_directory):\n self._port.limit_archived_results_count()\n self._port.rename_results_folder()\n self._port.host.filesystem.maybe_make_directory(self.\n _artifacts_directory)\n exit_code = self._port.setup_test_run()\n if exit_code:\n _log.error('Build setup failed')\n return exit_code\n if not self._options.nocheck_sys_deps:\n self._printer.write_update('Checking system dependencies ...')\n exit_code = self._port.check_sys_deps()\n if exit_code:\n return exit_code\n return exit_codes.OK_EXIT_STATUS\n\n def _run_tests(self, tests_to_run, tests_to_skip, repeat_each,\n iterations, num_workers, retry_attempt=0):\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(self._test_input_for_file(test,\n retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers, retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n if self._port.requires_http_server() or any(self._is_http_test(test\n ) for test in tests_to_run):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = (self.\n _results_directory)\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if not isinstance(failure, test_failures.FailureCrash\n ) or failure.has_log:\n continue\n crashed_processes.append([test, failure.process_name,\n failure.pid])\n test_to_crash_failure[test] = failure\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(test,\n test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self.\n _results_directory, artifacts_sub_dir, artifact_relative_path)\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact('sample_file', self.\n _filesystem.join(artifacts_sub_dir, artifact_relative_path))\n new_crash_logs = self._port.look_for_new_crash_logs(crashed_processes,\n start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(test_result.\n artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n return [result.test_name for result in run_results.\n unexpected_results_by_name.values() if result.type !=\n ResultType.Pass]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results, running_all_tests,\n run_histories):\n _log.debug('Writing JSON files in %s.', self._artifacts_directory)\n times_trie = json_results_generator.test_timings_trie(initial_results\n .results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(self._filesystem.dirname(\n bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_path)\n full_results_jsonp_path = self._filesystem.join(self.\n _artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, full_results_jsonp_path, callback=\n 'ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self.\n _artifacts_directory, 'failing_results.json')\n json_results_generator.write_json(self._filesystem,\n summarized_failing_results, failing_results_path, callback=\n 'ADD_RESULTS')\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results, self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(self._filesystem,\n run_histories, self._options.write_run_histories_to)\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + '.version']\n template_dir = self._path_finder.path_from_blink_tools('blinkpy',\n 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {'results': (_worker_number(\n result.worker_name), result.test_number, result.pid,\n int(result.test_run_time * 1000), int(result.\n total_run_time * 1000))}\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-5": "# Copyright (C) 2010 Google Inc. All rights reserved.\n# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"The Manager orchestrates the overall process of running web tests.\n\nThis includes finding tests to run, reading the test expectations,\nstarting the required helper servers, deciding the order and way to\nrun the tests, retrying failed tests, and collecting the test results,\nincluding crash logs and mismatches with expectations.\n\nThe Manager object has a constructor and one main method called run.\n\"\"\"\n\nimport fnmatch\nimport json\nimport logging\nimport os\nimport random\nimport signal\nimport sys\nimport time\n\nfrom blinkpy.common import exit_codes\nfrom blinkpy.common.path_finder import PathFinder\nfrom blinkpy.tool import grammar\nfrom blinkpy.web_tests.controllers.test_result_sink import CreateTestResultSink\nfrom blinkpy.web_tests.controllers.web_test_finder import WebTestFinder\nfrom blinkpy.web_tests.controllers.web_test_runner import WebTestRunner\nfrom blinkpy.web_tests.layout_package import json_results_generator\nfrom blinkpy.web_tests.models import test_expectations\nfrom blinkpy.web_tests.models import test_failures\nfrom blinkpy.web_tests.models import test_run_results\nfrom blinkpy.web_tests.models.typ_types import ResultType\nfrom blinkpy.web_tests.models.test_input import TestInput\n\n_log = logging.getLogger(__name__)\n\nTestExpectations = test_expectations.TestExpectations\n\n\nclass Manager(object):\n \"\"\"A class for managing running a series of web tests.\"\"\"\n\n HTTP_SUBDIR = 'http'\n PERF_SUBDIR = 'perf'\n WEBSOCKET_SUBDIR = 'websocket'\n ARCHIVED_RESULTS_LIMIT = 25\n\n def __init__(self, port, options, printer):\n \"\"\"Initializes test runner data structures.\n\n Args:\n port: An object implementing platform-specific functionality.\n options: An options argument which contains command line options.\n printer: A Printer object to record updates to.\n \"\"\"\n self._port = port\n self._filesystem = port.host.filesystem\n self._options = options\n self._printer = printer\n\n self._expectations = None\n self._http_server_started = False\n self._wptserve_started = False\n self._websockets_server_started = False\n\n self._results_directory = self._port.results_directory()\n self._artifacts_directory = self._port.artifacts_directory()\n self._finder = WebTestFinder(self._port, self._options)\n self._path_finder = PathFinder(port.host.filesystem)\n\n self._sink = CreateTestResultSink(self._port)\n self._runner = WebTestRunner(self._options, self._port, self._printer,\n self._results_directory,\n self._test_is_slow, self._sink)\n\n def run(self, args):\n \"\"\"Runs the tests and return a RunDetails object with the results.\"\"\"\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)\n\n def _register_termination_handler(self):\n if self._port.host.platform.is_win():\n signum = signal.SIGBREAK\n else:\n signum = signal.SIGTERM\n signal.signal(signum, self._on_termination)\n\n def _on_termination(self, signum, _frame):\n self._printer.write_update(\n 'Received signal \"%s\" (%d) in %d' %\n (signal.strsignal(signum), signum, os.getpid()))\n raise KeyboardInterrupt\n\n def _run_test_loop(self, tests_to_run, tests_to_skip):\n # Don't show results in a new browser window because we're already\n # printing the link to diffs in the loop\n self._options.show_results = False\n\n while True:\n initial_results, all_retry_results = self._run_test_once(\n tests_to_run, tests_to_skip, should_retry_failures=False)\n for name in initial_results.failures_by_name:\n failure = initial_results.failures_by_name[name][0]\n if isinstance(failure, test_failures.FailureTextMismatch):\n full_test_path = self._filesystem.join(\n self._artifacts_directory, name)\n filename, _ = self._filesystem.splitext(full_test_path)\n pretty_diff_path = 'file://' + filename + '-pretty-diff.html'\n self._printer.writeln('Link to pretty diff:')\n self._printer.writeln(pretty_diff_path + '\\n')\n self._printer.writeln('Finished running tests')\n\n user_input = self._port.host.user.prompt(\n 'Interactive watch mode: (q)uit (r)etry\\n').lower()\n\n if user_input == 'q' or user_input == 'quit':\n return (initial_results, all_retry_results)\n\n def _run_test_once(self, tests_to_run, tests_to_skip,\n should_retry_failures):\n num_workers = int(\n self._port.num_workers(int(self._options.child_processes)))\n\n initial_results = self._run_tests(\n tests_to_run, tests_to_skip, self._options.repeat_each,\n self._options.iterations, num_workers)\n\n # Don't retry failures when interrupted by user or failures limit exception.\n should_retry_failures = (should_retry_failures\n and not initial_results.interrupted)\n\n tests_to_retry = self._tests_to_retry(initial_results)\n all_retry_results = []\n if should_retry_failures and tests_to_retry:\n for retry_attempt in range(1, self._options.num_retries + 1):\n if not tests_to_retry:\n break\n\n _log.info('')\n _log.info(\n 'Retrying %s, attempt %d of %d...',\n grammar.pluralize('unexpected failure',\n len(tests_to_retry)), retry_attempt,\n self._options.num_retries)\n\n retry_results = self._run_tests(\n tests_to_retry,\n tests_to_skip=set(),\n repeat_each=1,\n iterations=1,\n num_workers=num_workers,\n retry_attempt=retry_attempt)\n all_retry_results.append(retry_results)\n\n tests_to_retry = self._tests_to_retry(retry_results)\n return (initial_results, all_retry_results)\n\n def _restore_order(self, paths, test_names):\n original_test_names = list(test_names)\n test_names = []\n for path in paths:\n for test in original_test_names:\n if test.startswith(path) or fnmatch.fnmatch(test, path):\n test_names.append(test)\n test_names += list(set(original_test_names) - set(test_names))\n return test_names\n\n def _collect_tests(self, args):\n return self._finder.find_tests(\n args,\n test_lists=self._options.test_list,\n filter_files=self._options.isolated_script_test_filter_file,\n fastest_percentile=self._options.fastest,\n filters=self._options.isolated_script_test_filter)\n\n def _is_http_test(self, test):\n return (\n test.startswith(self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR)\n or self._is_websocket_test(test) or self._port.TEST_PATH_SEPARATOR\n + self.HTTP_SUBDIR + self._port.TEST_PATH_SEPARATOR in test)\n\n def _is_websocket_test(self, test):\n if self._port.should_use_wptserve(test):\n return False\n\n return self.WEBSOCKET_SUBDIR + self._port.TEST_PATH_SEPARATOR in test\n\n def _http_tests(self, test_names):\n return set(test for test in test_names if self._is_http_test(test))\n\n def _is_perf_test(self, test):\n return (self.PERF_SUBDIR == test\n or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test)\n\n def _prepare_lists(self, paths, test_names):\n tests_to_skip = self._finder.skip_tests(paths, test_names,\n self._expectations)\n tests_to_run = [\n test for test in test_names if test not in tests_to_skip\n ]\n\n return tests_to_run, tests_to_skip\n\n def _test_input_for_file(self, test_file, retry_attempt):\n return TestInput(\n test_file,\n self._options.slow_timeout_ms\n if self._test_is_slow(test_file) else self._options.timeout_ms,\n self._test_requires_lock(test_file),\n retry_attempt=retry_attempt)\n\n def _test_requires_lock(self, test_file):\n \"\"\"Returns True if the test needs to be locked when running multiple\n instances of this test runner.\n\n Perf tests are locked because heavy load caused by running other\n tests in parallel might cause some of them to time out.\n \"\"\"\n return self._is_perf_test(test_file)\n\n def _test_is_slow(self, test_file):\n if not self._expectations:\n return False\n is_slow_test = self._expectations.get_expectations(\n test_file).is_slow_test\n return is_slow_test or self._port.is_slow_wpt_test(test_file)\n\n def _needs_servers(self, test_names):\n return any(\n self._is_http_test(test_name) for test_name in test_names)\n\n def _set_up_run(self, test_names):\n self._printer.write_update('Checking build ...')\n if self._options.build:\n exit_code = self._port.check_build(\n self._needs_servers(test_names), self._printer)\n if exit_code:\n _log.error('Build check failed')\n return exit_code\n\n if self._options.clobber_old_results:\n self._port.clobber_old_results()\n elif self._filesystem.exists(self._artifacts_directory):\n self._port.limit_archived_results_count()\n # Rename the existing results folder for archiving.\n self._port.rename_results_folder()\n\n # Create the output directory if it doesn't already exist.\n self._port.host.filesystem.maybe_make_directory(\n self._artifacts_directory)\n\n exit_code = self._port.setup_test_run()\n if exit_code:\n _log.error('Build setup failed')\n return exit_code\n\n # Check that the system dependencies (themes, fonts, ...) are correct.\n if not self._options.nocheck_sys_deps:\n self._printer.write_update('Checking system dependencies ...')\n exit_code = self._port.check_sys_deps()\n if exit_code:\n return exit_code\n\n return exit_codes.OK_EXIT_STATUS\n\n def _run_tests(self,\n tests_to_run,\n tests_to_skip,\n repeat_each,\n iterations,\n num_workers,\n retry_attempt=0):\n\n test_inputs = []\n for _ in range(iterations):\n for test in tests_to_run:\n for _ in range(repeat_each):\n test_inputs.append(\n self._test_input_for_file(test, retry_attempt))\n return self._runner.run_tests(self._expectations, test_inputs,\n tests_to_skip, num_workers,\n retry_attempt)\n\n def _start_servers(self, tests_to_run):\n if any(self._port.is_wpt_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WPTServe ...')\n self._port.start_wptserve()\n self._wptserve_started = True\n\n if (self._port.requires_http_server()\n or any(self._is_http_test(test) for test in tests_to_run)):\n self._printer.write_update('Starting HTTP server ...')\n self._port.start_http_server(\n additional_dirs={},\n number_of_drivers=self._options.max_locked_shards)\n self._http_server_started = True\n\n if any(self._is_websocket_test(test) for test in tests_to_run):\n self._printer.write_update('Starting WebSocket server ...')\n self._port.start_websocket_server()\n self._websockets_server_started = True\n\n def _stop_servers(self):\n if self._wptserve_started:\n self._printer.write_update('Stopping WPTServe ...')\n self._wptserve_started = False\n self._port.stop_wptserve()\n if self._http_server_started:\n self._printer.write_update('Stopping HTTP server ...')\n self._http_server_started = False\n self._port.stop_http_server()\n if self._websockets_server_started:\n self._printer.write_update('Stopping WebSocket server ...')\n self._websockets_server_started = False\n self._port.stop_websocket_server()\n\n def _clean_up_run(self):\n _log.debug('Flushing stdout')\n sys.stdout.flush()\n _log.debug('Flushing stderr')\n sys.stderr.flush()\n _log.debug('Cleaning up port')\n self._port.clean_up_test_run()\n if self._sink:\n _log.debug('Closing sink')\n self._sink.close()\n\n def _look_for_new_crash_logs(self, run_results, start_time):\n \"\"\"Looks for and writes new crash logs, at the end of the test run.\n\n Since crash logs can take a long time to be written out if the system is\n under stress, do a second pass at the end of the test run.\n\n Args:\n run_results: The results of the test run.\n start_time: Time the tests started at. We're looking for crash\n logs after that time.\n \"\"\"\n crashed_processes = []\n test_to_crash_failure = {}\n\n # reset static variables for Failure type classes\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = self._results_directory\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if (not isinstance(failure, test_failures.FailureCrash)\n or failure.has_log):\n continue\n crashed_processes.append(\n [test, failure.process_name, failure.pid])\n test_to_crash_failure[test] = failure\n\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(\n test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self._results_directory,\n artifacts_sub_dir,\n artifact_relative_path)\n self._filesystem.maybe_make_directory(\n self._filesystem.dirname(artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact(\n 'sample_file',\n self._filesystem.join(artifacts_sub_dir,\n artifact_relative_path))\n\n new_crash_logs = self._port.look_for_new_crash_logs(\n crashed_processes, start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(\n test_result.artifacts, force_overwrite=True)\n\n def _tests_to_retry(self, run_results):\n # TODO(ojan): This should also check that result.type != test_expectations.MISSING\n # since retrying missing expectations is silly. But that's a bit tricky since we\n # only consider the last retry attempt for the count of unexpected regressions.\n return [\n result.test_name\n for result in run_results.unexpected_results_by_name.values()\n if result.type != ResultType.Pass\n ]\n\n def _write_json_files(self, summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories):\n _log.debug(\"Writing JSON files in %s.\", self._artifacts_directory)\n\n # FIXME: Upload stats.json to the server and delete times_ms.\n times_trie = json_results_generator.test_timings_trie(\n initial_results.results_by_name.values())\n times_json_path = self._filesystem.join(self._artifacts_directory,\n 'times_ms.json')\n json_results_generator.write_json(self._filesystem, times_trie,\n times_json_path)\n\n # Save out the times data so we can use it for --fastest in the future.\n if running_all_tests:\n bot_test_times_path = self._port.bot_test_times_path()\n self._filesystem.maybe_make_directory(\n self._filesystem.dirname(bot_test_times_path))\n json_results_generator.write_json(self._filesystem, times_trie,\n bot_test_times_path)\n\n stats_trie = self._stats_trie(initial_results)\n stats_path = self._filesystem.join(self._artifacts_directory,\n 'stats.json')\n self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))\n\n full_results_path = self._filesystem.join(self._artifacts_directory,\n 'full_results.json')\n json_results_generator.write_json(\n self._filesystem, summarized_full_results, full_results_path)\n\n full_results_jsonp_path = self._filesystem.join(\n self._artifacts_directory, 'full_results_jsonp.js')\n json_results_generator.write_json(\n self._filesystem,\n summarized_full_results,\n full_results_jsonp_path,\n callback='ADD_FULL_RESULTS')\n failing_results_path = self._filesystem.join(self._artifacts_directory,\n 'failing_results.json')\n # We write failing_results.json out as jsonp because we need to load it\n # from a file url for results.html and Chromium doesn't allow that.\n json_results_generator.write_json(\n self._filesystem,\n summarized_failing_results,\n failing_results_path,\n callback='ADD_RESULTS')\n\n if self._options.json_test_results:\n json_results_generator.write_json(self._filesystem,\n summarized_full_results,\n self._options.json_test_results)\n if self._options.write_run_histories_to:\n json_results_generator.write_json(\n self._filesystem, run_histories,\n self._options.write_run_histories_to)\n\n _log.debug('Finished writing JSON files.')\n\n def _copy_results_html_file(self, destination_dir, filename):\n \"\"\"Copies a file from the template directory to the results directory.\"\"\"\n files_to_copy = [filename, filename + \".version\"]\n template_dir = self._path_finder.path_from_blink_tools(\n 'blinkpy', 'web_tests')\n for filename in files_to_copy:\n source_path = self._filesystem.join(template_dir, filename)\n destination_path = self._filesystem.join(destination_dir, filename)\n # Note that the results.html template file won't exist when\n # we're using a MockFileSystem during unit tests, so make sure\n # it exists before we try to copy it.\n if self._filesystem.exists(source_path):\n self._filesystem.copyfile(source_path, destination_path)\n\n def _stats_trie(self, initial_results):\n def _worker_number(worker_name):\n return int(worker_name.split('/')[1]) if worker_name else -1\n\n stats = {}\n for result in initial_results.results_by_name.values():\n if result.type != ResultType.Skip:\n stats[result.test_name] = {\n 'results': (_worker_number(result.worker_name),\n result.test_number, result.pid,\n int(result.test_run_time * 1000),\n int(result.total_run_time * 1000))\n }\n stats_trie = {}\n for name, value in stats.items():\n json_results_generator.add_path_to_trie(name, value, stats_trie)\n return stats_trie\n",
"step-ids": [
14,
20,
25,
31,
33
]
}
|
[
14,
20,
25,
31,
33
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('Registration', '0015_auto_20150525_1815'),
]
operations = [
migrations.AlterField(
model_name='user',
name='created_date',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='modified_date',
field=models.DateField(auto_now=True),
),
migrations.AlterField(
model_name='user_skills',
name='percentage',
field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
|
normal
|
{
"blob_id": "7a1be5c9c48413ba1969631e99ecb45cf15ef613",
"index": 559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Registration', '0015_auto_20150525_1815'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='created_date',\n field=models.DateField(auto_now_add=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='last_login',\n field=models.DateTimeField(null=True, verbose_name='last login', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='modified_date',\n field=models.DateField(auto_now=True),\n ),\n migrations.AlterField(\n model_name='user_skills',\n name='percentage',\n field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
''' Load a variety of relevant physical parameters.
All quantities are in atomic units, such that
m_e = 1
e = 1
hbar = 1
1/4\pi\epsilon = 1
'''
import numpy as np
hbar = 1.0
m_e = 1.0
h22m = hbar**2 / (2*m_e)
pi = np.pi
eV = 1/27.21138505
eV_Ha = eV
nm = 18.89726124565
kB_eV = 8.6173324e-5
kB = kB_eV * eV_Ha
|
normal
|
{
"blob_id": "f9f835b24aa8fc77109db9e2d89a3f43bcb4b181",
"index": 7079,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhbar = 1.0\nm_e = 1.0\nh22m = hbar ** 2 / (2 * m_e)\npi = np.pi\neV = 1 / 27.21138505\neV_Ha = eV\nnm = 18.89726124565\nkB_eV = 8.6173324e-05\nkB = kB_eV * eV_Ha\n",
"step-3": "<mask token>\nimport numpy as np\nhbar = 1.0\nm_e = 1.0\nh22m = hbar ** 2 / (2 * m_e)\npi = np.pi\neV = 1 / 27.21138505\neV_Ha = eV\nnm = 18.89726124565\nkB_eV = 8.6173324e-05\nkB = kB_eV * eV_Ha\n",
"step-4": "''' Load a variety of relevant physical parameters.\n\nAll quantities are in atomic units, such that\n m_e = 1\n e = 1\n hbar = 1\n 1/4\\pi\\epsilon = 1\n'''\n\nimport numpy as np\n\nhbar = 1.0\nm_e = 1.0\nh22m = hbar**2 / (2*m_e)\npi = np.pi\neV = 1/27.21138505\neV_Ha = eV\nnm = 18.89726124565\n\nkB_eV = 8.6173324e-5\nkB = kB_eV * eV_Ha \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py
|
normal
|
{
"blob_id": "6531833a4fe57c15c0668cee9015c7d43491427a",
"index": 341,
"step-1": "/home/openerp/production/extra-addons/productivity_analysis/report/productivity_analysis.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#聚类算法:
# kmeans
# 密度聚类:DBSCAN
# 层次聚类:AgglomerativeClustering
# 谱聚类:SpectralClustering
# 分批kmeans:MiniBatchKMeans
# 评价指标:FMI(Fowlkes–Mallows index)
# 排除:特征聚类:FeatureAgglomeration# 亲和传播聚类(AP)聚类:affinity_propagation# 偏移均值向量:MeanShift
import numpy as np
import sklearn.cluster as cluster
import os
import time
import argparse
import csv
from sklearn import metrics
import sys
sys.path.append('./feature/')
import feature_extraction as fe
def sort_data(data_list):
x_list=[]
y_list=[]
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array=np.array(x_list)
y_array=np.array(y_list)
return x_array,y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output=func(*args, **kw)
time_cost=time.time() - local_time
print('{} run time is {}'.format(func.__name__,time_cost))
with open("./cluster/tmp.csv","a+") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__,output,time_cost])
return output,time_cost
return wrapper
@print_run_time
def kmeans (train_x,train_y,num_cluster = 5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,km_cluster.labels_)
# print("kmeans的FMI评价分值为:%f"%(fmi))
return fmi
@print_run_time
def dbscan(train_x,train_y):
# 密度聚类
db = cluster.DBSCAN(eps=0.2,min_samples=3)
db.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,db.labels_)
return fmi
@print_run_time
def AC(train_x,train_y,num_cluster = 5):
# 层次聚类
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
# #计算ARI指数
# ARI = (metrics.adjusted_rand_score(train_y, predicted_labels))
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,ac.labels_)
return fmi
@print_run_time
# def AP(train_x,train_y):
# #亲和传播聚类(AP)聚类
# ap = cluster.affinity_propagation(preference=-50).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ap.labels_)
# return fmi
# @print_run_time
# def meanshift(train_x,train_y):
# #偏移均值向量(meanshift)
# ms = cluster.MeanShift(bandwidth=2).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ms.labels_)
# return fmi
@print_run_time
def S_C(train_x,train_y,num_cluster = 5):
#谱聚类
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,sc.labels_)
return fmi
# @print_run_time
# def FA(train_x,train_y,num_cluster = 5):
# #特征聚类
# fa = cluster.FeatureAgglomeration(n_clusters=num_cluster).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,fa.labels_)
# return fmi
@print_run_time
def MBK(train_x,train_y,num_cluster = 5):
#分批kmeans
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,mbk.labels_)
return fmi
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("-d", "--dataset", type=str, default="cit-HepPh", help="")
parser.add_argument("-t", "--task", type=int, default=0, help="")
parser.add_argument("-f", "--feature_type", type=int, default=0, help="")
parser.add_argument("-l", "--label_type", type=int, default=2, help="")
parser.add_argument("-s", "--shuffle", type=bool, default=True, help="")
parser.add_argument("-p", "--proportion", type=tuple, default=(0.7, 0.3), help="")
parser.add_argument("-m", "--method", type=str, default='all',choices=['kmeans','dbscan','AC','AP','meanshift','S_C','FA','MBK','all'], help="")
parser.add_argument("-sp", "--save_path", type=str, default='./cluster/result.csv', help="")
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.dataset, task=args.task,
feature_type=args.feature_type, label_type=args.label_type,
shuffle=args.shuffle, proportion=args.proportion)
train_x,train_y=sort_data(training_set)
val_x,val_y=sort_data(validation_set)
with open("./cluster/tmp.csv","w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method','index','time_cost'])
if args.method=='kmeans':
acc = kmeans(train_x,train_y,len(np.unique(train_y)))
elif args.method=='dbscan':
acc = dbscan(train_x,train_y)
elif args.method=='AC':
acc = AC(train_x,train_y,len(np.unique(train_y)))
elif args.method=='AP':
acc = AP(train_x,train_y)
elif args.method=='meanshift':
acc = meanshift(train_x,train_y)
elif args.method=='S_C':
acc = S_C(train_x,train_y,len(np.unique(train_y)))
elif args.method=='FA':
acc = FA(train_x,train_y,len(np.unique(train_y)))
elif args.method=='MBK':
acc = MBK(train_x,train_y,len(np.unique(train_y)))
elif args.method=='all':
acc_k = kmeans(train_x,train_y,len(np.unique(train_y)))
acc_ac = AC(train_x,train_y,len(np.unique(train_y)))
acc_sc = S_C(train_x,train_y,len(np.unique(train_y)))
# acc_fa = FA(train_x,train_y,len(np.unique(train_y))) ValueError: Found input variables with inconsistent numbers of samples: [7414, 24684]
acc_mbk = MBK(train_x,train_y,len(np.unique(train_y)))
acc_db = dbscan(train_x,train_y)
# acc_ap = AP(train_x,train_y) affinity_propagation() missing 1 required positional argument: 'S'
# acc_ms = meanshift(train_x,train_y) timesout
tmp_path=os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv',args.save_path)
|
normal
|
{
"blob_id": "aaebd9eba8a5c51c64baaf60224720b87a6364e1",
"index": 1388,
"step-1": "<mask token>\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('./feature/')\n<mask token>\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',\n help='')\n parser.add_argument('-t', '--task', type=int, default=0, help='')\n parser.add_argument('-f', '--feature_type', type=int, default=0, help='')\n parser.add_argument('-l', '--label_type', type=int, default=2, help='')\n parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')\n parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3\n ), help='')\n parser.add_argument('-m', '--method', type=str, default='all', choices=\n ['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',\n 'all'], help='')\n parser.add_argument('-sp', '--save_path', type=str, default=\n './cluster/result.csv', help='')\n args = parser.parse_args()\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.\n dataset, task=args.task, feature_type=args.feature_type, label_type\n =args.label_type, shuffle=args.shuffle, proportion=args.proportion)\n train_x, train_y = sort_data(training_set)\n val_x, val_y = sort_data(validation_set)\n with open('./cluster/tmp.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['method', 'index', 'time_cost'])\n if args.method == 'kmeans':\n acc = kmeans(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'dbscan':\n acc = dbscan(train_x, train_y)\n elif args.method == 'AC':\n acc = AC(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'AP':\n acc = AP(train_x, train_y)\n elif args.method == 'meanshift':\n acc = meanshift(train_x, train_y)\n elif args.method == 'S_C':\n acc = S_C(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'FA':\n acc = FA(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'MBK':\n acc = MBK(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'all':\n acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))\n acc_ac = AC(train_x, train_y, len(np.unique(train_y)))\n acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))\n acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))\n acc_db = dbscan(train_x, train_y)\n tmp_path = os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv', args.save_path)\n",
"step-4": "import numpy as np\nimport sklearn.cluster as cluster\nimport os\nimport time\nimport argparse\nimport csv\nfrom sklearn import metrics\nimport sys\nsys.path.append('./feature/')\nimport feature_extraction as fe\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',\n help='')\n parser.add_argument('-t', '--task', type=int, default=0, help='')\n parser.add_argument('-f', '--feature_type', type=int, default=0, help='')\n parser.add_argument('-l', '--label_type', type=int, default=2, help='')\n parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')\n parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3\n ), help='')\n parser.add_argument('-m', '--method', type=str, default='all', choices=\n ['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',\n 'all'], help='')\n parser.add_argument('-sp', '--save_path', type=str, default=\n './cluster/result.csv', help='')\n args = parser.parse_args()\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.\n dataset, task=args.task, feature_type=args.feature_type, label_type\n =args.label_type, shuffle=args.shuffle, proportion=args.proportion)\n train_x, train_y = sort_data(training_set)\n val_x, val_y = sort_data(validation_set)\n with open('./cluster/tmp.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['method', 'index', 'time_cost'])\n if args.method == 'kmeans':\n acc = kmeans(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'dbscan':\n acc = dbscan(train_x, train_y)\n elif args.method == 'AC':\n acc = AC(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'AP':\n acc = AP(train_x, train_y)\n elif args.method == 'meanshift':\n acc = meanshift(train_x, train_y)\n elif args.method == 'S_C':\n acc = S_C(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'FA':\n acc = FA(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'MBK':\n acc = MBK(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'all':\n acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))\n acc_ac = AC(train_x, train_y, len(np.unique(train_y)))\n acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))\n acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))\n acc_db = dbscan(train_x, train_y)\n tmp_path = os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv', args.save_path)\n",
"step-5": "#聚类算法:\n # kmeans\n # 密度聚类:DBSCAN\n # 层次聚类:AgglomerativeClustering \n # 谱聚类:SpectralClustering\n # 分批kmeans:MiniBatchKMeans\n# 评价指标:FMI(Fowlkes–Mallows index)\n# 排除:特征聚类:FeatureAgglomeration# 亲和传播聚类(AP)聚类:affinity_propagation# 偏移均值向量:MeanShift\nimport numpy as np\nimport sklearn.cluster as cluster\nimport os\nimport time\nimport argparse\nimport csv\nfrom sklearn import metrics\nimport sys\nsys.path.append('./feature/')\nimport feature_extraction as fe\n\ndef sort_data(data_list):\n x_list=[]\n y_list=[]\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array=np.array(x_list)\n y_array=np.array(y_list)\n return x_array,y_array\n\ndef print_run_time(func): \n def wrapper(*args, **kw): \n local_time = time.time() \n output=func(*args, **kw)\n time_cost=time.time() - local_time\n print('{} run time is {}'.format(func.__name__,time_cost))\n with open(\"./cluster/tmp.csv\",\"a+\") as csvfile: \n writer = csv.writer(csvfile)\n writer.writerow([func.__name__,output,time_cost])\n return output,time_cost\n return wrapper\n\n@print_run_time\ndef kmeans (train_x,train_y,num_cluster = 5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,km_cluster.labels_)\n # print(\"kmeans的FMI评价分值为:%f\"%(fmi))\n return fmi\n\n@print_run_time\ndef dbscan(train_x,train_y):\n # 密度聚类\n db = cluster.DBSCAN(eps=0.2,min_samples=3)\n db.fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,db.labels_)\n return fmi\n\n@print_run_time\ndef AC(train_x,train_y,num_cluster = 5):\n # 层次聚类\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n # #计算ARI指数\n # ARI = (metrics.adjusted_rand_score(train_y, predicted_labels))\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,ac.labels_)\n \n return fmi\n@print_run_time\n# def AP(train_x,train_y):\n# #亲和传播聚类(AP)聚类\n# ap = cluster.affinity_propagation(preference=-50).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,ap.labels_)\n# return fmi \n\n# @print_run_time\n# def meanshift(train_x,train_y):\n# #偏移均值向量(meanshift)\n# ms = cluster.MeanShift(bandwidth=2).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,ms.labels_)\n# return fmi\n\n@print_run_time\ndef S_C(train_x,train_y,num_cluster = 5):\n #谱聚类\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,sc.labels_)\n return fmi\n\n# @print_run_time\n# def FA(train_x,train_y,num_cluster = 5):\n# #特征聚类\n# fa = cluster.FeatureAgglomeration(n_clusters=num_cluster).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,fa.labels_)\n# return fmi\n\n@print_run_time\ndef MBK(train_x,train_y,num_cluster = 5):\n #分批kmeans\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,mbk.labels_)\n return fmi\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"-d\", \"--dataset\", type=str, default=\"cit-HepPh\", help=\"\")\n parser.add_argument(\"-t\", \"--task\", type=int, default=0, help=\"\")\n parser.add_argument(\"-f\", \"--feature_type\", type=int, default=0, help=\"\")\n parser.add_argument(\"-l\", \"--label_type\", type=int, default=2, help=\"\")\n parser.add_argument(\"-s\", \"--shuffle\", type=bool, default=True, help=\"\")\n parser.add_argument(\"-p\", \"--proportion\", type=tuple, default=(0.7, 0.3), help=\"\")\n parser.add_argument(\"-m\", \"--method\", type=str, default='all',choices=['kmeans','dbscan','AC','AP','meanshift','S_C','FA','MBK','all'], help=\"\")\n parser.add_argument(\"-sp\", \"--save_path\", type=str, default='./cluster/result.csv', help=\"\") \n args = parser.parse_args()\n\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.dataset, task=args.task,\n feature_type=args.feature_type, label_type=args.label_type,\n shuffle=args.shuffle, proportion=args.proportion)\n train_x,train_y=sort_data(training_set)\n val_x,val_y=sort_data(validation_set)\n\n with open(\"./cluster/tmp.csv\",\"w\") as csvfile: \n writer = csv.writer(csvfile)\n writer.writerow(['method','index','time_cost'])\n\n if args.method=='kmeans':\n acc = kmeans(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='dbscan':\n acc = dbscan(train_x,train_y)\n elif args.method=='AC':\n acc = AC(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='AP':\n acc = AP(train_x,train_y)\n elif args.method=='meanshift':\n acc = meanshift(train_x,train_y)\n elif args.method=='S_C':\n acc = S_C(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='FA':\n acc = FA(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='MBK':\n acc = MBK(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='all':\n acc_k = kmeans(train_x,train_y,len(np.unique(train_y)))\n acc_ac = AC(train_x,train_y,len(np.unique(train_y)))\n acc_sc = S_C(train_x,train_y,len(np.unique(train_y)))\n # acc_fa = FA(train_x,train_y,len(np.unique(train_y))) ValueError: Found input variables with inconsistent numbers of samples: [7414, 24684]\n acc_mbk = MBK(train_x,train_y,len(np.unique(train_y)))\n acc_db = dbscan(train_x,train_y)\n # acc_ap = AP(train_x,train_y) affinity_propagation() missing 1 required positional argument: 'S'\n # acc_ms = meanshift(train_x,train_y) timesout\n \n\n tmp_path=os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv',args.save_path)\n",
"step-ids": [
1,
7,
8,
9,
10
]
}
|
[
1,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TileMapScalePlugin
A QGIS plugin
Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.
-------------------
begin : 2014-03-03
copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load TileMapScalePlugin class from file TileMapScalePlugin
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
|
normal
|
{
"blob_id": "f2e2ebd5b848cf3a01b7304e5e194beb3eec1c10",
"index": 1214,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classFactory(iface):\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-3": "# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n TileMapScalePlugin\n A QGIS plugin\n Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.\n -------------------\n begin : 2014-03-03\n copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions\n email : [email protected]\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n This script initializes the plugin, making it known to QGIS.\n\"\"\"\n\ndef classFactory(iface):\n # load TileMapScalePlugin class from file TileMapScalePlugin\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/python
"""
Expression Parser Tree for fully parenthesized input expression
"""
from bintree import BinaryTree
from stackModule import Stack
def buildParseTree(expression):
expList = expression.split()
empTree = BinaryTree('')
parentStack = Stack()
parentStack.push(empTree)
currentNode = empTree
for item in expList:
if item == '(':
currentNode.insertLeft('')
parentStack.push(currentNode)
currentNode = currentNode.getLeftChild()
elif item not in ['+', '-', '*', '/', ')']:
currentNode.setRootValue(int(item))
currentNode = parentStack.pop()
elif item in ['+', '-', '*', '/']:
currentNode.setRootValue(item)
currentNode.insertRight('')
parentStack.push(currentNode)
currentNode = currentNode.getRightChild()
elif item == ')':
currentNode = parentStack.pop()
else:
raise ValueError
return empTree
import operator
def evaluate(parseTree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
leftC = parseTree.getLeftChild()
rightC = parseTree.getRightChild()
if leftC and rightC:
fn = opers[parseTree.getRootValue()]
return fn(evaluate(leftC),evaluate(rightC))
else:
return parseTree.getRootValue()
def postOrderTraversal(parseTree):
if parseTree != None:
postOrderTraversal(parseTree.getLeftChild())
postOrderTraversal(parseTree.getRightChild())
print parseTree.getRootValue()
def preOrderTraversal(parseTree):
if parseTree !=None:
print parseTree.getRootValue()
preOrderTraversal(parseTree.getLeftChild())
preOrderTraversal(parseTree.getRightChild())
def inOrderTraversal(parseTree):
if parseTree !=None:
inOrderTraversal(parseTree.getLeftChild())
print parseTree.getRootValue()
inOrderTraversal(parseTree.getRightChild())
def iterInOrder(currentTree):
pStack = Stack()
print "\nPrinting in order traversal\n"
while currentTree != None or not pStack.isEmpty():
if currentTree !=None:
pStack.push(currentTree)
currentTree = currentTree.getLeftChild()
else:
currentTree = pStack.pop()
print currentTree.getRootValue()
currentTree = currentTree.getRightChild()
pt = buildParseTree("( ( 10 + 5 ) * 3 )")
print "\nGiven Expression evaluates to %d\n" % evaluate(pt)
preOrderTraversal(pt)
postOrderTraversal(pt)
inOrderTraversal(pt)
iterInOrder(pt)
|
normal
|
{
"blob_id": "e18ebf961c2daa7dd127d08f85edb6ea519e3470",
"index": 8359,
"step-1": "#!/usr/bin/python\n\n\"\"\"\nExpression Parser Tree for fully parenthesized input expression\n\"\"\"\n\nfrom bintree import BinaryTree\nfrom stackModule import Stack\n\ndef buildParseTree(expression):\n expList = expression.split()\n empTree = BinaryTree('')\n parentStack = Stack()\n parentStack.push(empTree)\n currentNode = empTree\n\n for item in expList:\n if item == '(':\n currentNode.insertLeft('')\n parentStack.push(currentNode)\n currentNode = currentNode.getLeftChild()\n elif item not in ['+', '-', '*', '/', ')']:\n currentNode.setRootValue(int(item))\n currentNode = parentStack.pop()\n elif item in ['+', '-', '*', '/']:\n currentNode.setRootValue(item)\n currentNode.insertRight('')\n parentStack.push(currentNode)\n currentNode = currentNode.getRightChild()\n elif item == ')':\n currentNode = parentStack.pop()\n else:\n raise ValueError\n return empTree\n\nimport operator\n\ndef evaluate(parseTree):\n opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}\n\n leftC = parseTree.getLeftChild()\n rightC = parseTree.getRightChild()\n\n if leftC and rightC:\n fn = opers[parseTree.getRootValue()]\n return fn(evaluate(leftC),evaluate(rightC))\n else:\n return parseTree.getRootValue()\n\ndef postOrderTraversal(parseTree):\n\n if parseTree != None:\n postOrderTraversal(parseTree.getLeftChild())\n postOrderTraversal(parseTree.getRightChild())\n print parseTree.getRootValue()\n\ndef preOrderTraversal(parseTree):\n\n if parseTree !=None:\n print parseTree.getRootValue()\n preOrderTraversal(parseTree.getLeftChild())\n preOrderTraversal(parseTree.getRightChild())\n\ndef inOrderTraversal(parseTree):\n\n if parseTree !=None:\n inOrderTraversal(parseTree.getLeftChild())\n print parseTree.getRootValue()\n inOrderTraversal(parseTree.getRightChild())\n\n\ndef iterInOrder(currentTree):\n pStack = Stack()\n print \"\\nPrinting in order traversal\\n\"\n while currentTree != None or not pStack.isEmpty():\n if currentTree !=None:\n pStack.push(currentTree)\n currentTree = currentTree.getLeftChild()\n else:\n currentTree = pStack.pop()\n print currentTree.getRootValue()\n currentTree = currentTree.getRightChild()\n\n\npt = buildParseTree(\"( ( 10 + 5 ) * 3 )\")\nprint \"\\nGiven Expression evaluates to %d\\n\" % evaluate(pt)\npreOrderTraversal(pt)\npostOrderTraversal(pt)\ninOrderTraversal(pt)\niterInOrder(pt)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from editor.editor import Editor
e = Editor()
e.showWindow()
|
normal
|
{
"blob_id": "46d6771fd9f589e2498cd019ba72232cbda06e5a",
"index": 3108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ne.showWindow()\n",
"step-3": "<mask token>\ne = Editor()\ne.showWindow()\n",
"step-4": "from editor.editor import Editor\ne = Editor()\ne.showWindow()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
model = ResNet50(weights='imagenet', # Learned weights on imagenet
include_top=True)
img_input = image.load_img('my_picture.jpg', target_size=(224, 224))
img_input = image.img_to_array(img_input)
img_input = preprocess_input(img_input[np.newaxis, ...])
preds = model.predict(img_input)
decoded_predictions = decode_predictions(preds, top=10)[0]
print(decoded_predictions)
|
normal
|
{
"blob_id": "1af6e66c19078a9ee971f608daa93247911d8406",
"index": 5881,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(decoded_predictions)\n",
"step-3": "<mask token>\nmodel = ResNet50(weights='imagenet', include_top=True)\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\nprint(decoded_predictions)\n",
"step-4": "from tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nimport numpy as np\nmodel = ResNet50(weights='imagenet', include_top=True)\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\nprint(decoded_predictions)\n",
"step-5": "from tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\nimport numpy as np\n\nmodel = ResNet50(weights='imagenet', # Learned weights on imagenet\n include_top=True)\n\nimg_input = image.load_img('my_picture.jpg', target_size=(224, 224))\nimg_input = image.img_to_array(img_input)\nimg_input = preprocess_input(img_input[np.newaxis, ...])\n\npreds = model.predict(img_input)\ndecoded_predictions = decode_predictions(preds, top=10)[0]\n\nprint(decoded_predictions)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import generic
name = __name__
def options(opt):
generic._options(opt, name)
def configure(cfg):
generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=
name.lower(), uses='LIBZMQ', mandatory=True)
|
normal
|
{
"blob_id": "9e511c769f6ccedc06845a382171fb3729913d05",
"index": 9767,
"step-1": "<mask token>\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-3": "<mask token>\nname = __name__\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-4": "import generic\nname = __name__\n\n\ndef options(opt):\n generic._options(opt, name)\n\n\ndef configure(cfg):\n generic._configure(cfg, name, incs=('czmq.h',), libs=('czmq',), pcname=\n name.lower(), uses='LIBZMQ', mandatory=True)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
"""
Quick select (randomized selection algorithm)
- based on quick sort (ch8_sorting); used to obtain the ith-smallest element in an unordered list of items (e.g.numbers)
"""
def swap(unsorted_array, a, b):
temp = unsorted_array[a]
unsorted_array[a] = unsorted_array[b]
unsorted_array[b] = temp
def partition(unsorted_array, first_index, last_index):
# these 2 lines added, comparing to quick_sort partition
# => there's only one element in our sublist => return any of the function parameters
if first_index == last_index:
return first_index
# This choice to make the 1st element the pivot is a random decision.
# It often does not yield a good split and subsequently a good partition.
# However, the ith element will eventually be found.
pivot = unsorted_array[first_index]
pivot_index = first_index
index_of_last_element = last_index
less_than_pivot_index = index_of_last_element
greater_than_pivot_index = first_index + 1
while True:
while unsorted_array[greater_than_pivot_index] < pivot and greater_than_pivot_index < last_index:
greater_than_pivot_index += 1
while unsorted_array[less_than_pivot_index] > pivot and less_than_pivot_index >= first_index:
less_than_pivot_index -= 1
if greater_than_pivot_index < less_than_pivot_index:
temp = unsorted_array[greater_than_pivot_index]
swap(unsorted_array, greater_than_pivot_index, less_than_pivot_index)
else:
break
unsorted_array[pivot_index] = unsorted_array[less_than_pivot_index]
unsorted_array[less_than_pivot_index] = pivot
# returns the pivot index pointed to by less_than_pivot_index
return less_than_pivot_index
# parameters: the index of the first, the last, the ith element
def quick_select_helper(unsorted_array, left, right, k):
# returns the split index = the position in the unordered list where
# all elements between right to split-1 are < the element contained in the array split,
# while all elements between split+1 to left are greater.
split_point = partition(unsorted_array, left, right)
if split_point == k:
return unsorted_array[k]
# => the kth-smallest item should exist/be found between split+1 and right:
elif split_point < k:
return quick_select_helper(unsorted_array, split_point + 1, right, k)
else:
return quick_select_helper(unsorted_array, left, split_point - 1, k)
def quick_select(unsorted_array, k):
quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)
u_array = [43, 3, 20, 4, 89, 77]
quick_select(u_array, 1)
print(u_array)
|
normal
|
{
"blob_id": "f9234741c6356b4677b5d32ffea86549d001c258",
"index": 5625,
"step-1": "<mask token>\n\n\ndef swap(unsorted_array, a, b):\n temp = unsorted_array[a]\n unsorted_array[a] = unsorted_array[b]\n unsorted_array[b] = temp\n\n\n<mask token>\n\n\ndef quick_select_helper(unsorted_array, left, right, k):\n split_point = partition(unsorted_array, left, right)\n if split_point == k:\n return unsorted_array[k]\n elif split_point < k:\n return quick_select_helper(unsorted_array, split_point + 1, right, k)\n else:\n return quick_select_helper(unsorted_array, left, split_point - 1, k)\n\n\ndef quick_select(unsorted_array, k):\n quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef swap(unsorted_array, a, b):\n temp = unsorted_array[a]\n unsorted_array[a] = unsorted_array[b]\n unsorted_array[b] = temp\n\n\ndef partition(unsorted_array, first_index, last_index):\n if first_index == last_index:\n return first_index\n pivot = unsorted_array[first_index]\n pivot_index = first_index\n index_of_last_element = last_index\n less_than_pivot_index = index_of_last_element\n greater_than_pivot_index = first_index + 1\n while True:\n while unsorted_array[greater_than_pivot_index\n ] < pivot and greater_than_pivot_index < last_index:\n greater_than_pivot_index += 1\n while unsorted_array[less_than_pivot_index\n ] > pivot and less_than_pivot_index >= first_index:\n less_than_pivot_index -= 1\n if greater_than_pivot_index < less_than_pivot_index:\n temp = unsorted_array[greater_than_pivot_index]\n swap(unsorted_array, greater_than_pivot_index,\n less_than_pivot_index)\n else:\n break\n unsorted_array[pivot_index] = unsorted_array[less_than_pivot_index]\n unsorted_array[less_than_pivot_index] = pivot\n return less_than_pivot_index\n\n\ndef quick_select_helper(unsorted_array, left, right, k):\n split_point = partition(unsorted_array, left, right)\n if split_point == k:\n return unsorted_array[k]\n elif split_point < k:\n return quick_select_helper(unsorted_array, split_point + 1, right, k)\n else:\n return quick_select_helper(unsorted_array, left, split_point - 1, k)\n\n\ndef quick_select(unsorted_array, k):\n quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef swap(unsorted_array, a, b):\n temp = unsorted_array[a]\n unsorted_array[a] = unsorted_array[b]\n unsorted_array[b] = temp\n\n\ndef partition(unsorted_array, first_index, last_index):\n if first_index == last_index:\n return first_index\n pivot = unsorted_array[first_index]\n pivot_index = first_index\n index_of_last_element = last_index\n less_than_pivot_index = index_of_last_element\n greater_than_pivot_index = first_index + 1\n while True:\n while unsorted_array[greater_than_pivot_index\n ] < pivot and greater_than_pivot_index < last_index:\n greater_than_pivot_index += 1\n while unsorted_array[less_than_pivot_index\n ] > pivot and less_than_pivot_index >= first_index:\n less_than_pivot_index -= 1\n if greater_than_pivot_index < less_than_pivot_index:\n temp = unsorted_array[greater_than_pivot_index]\n swap(unsorted_array, greater_than_pivot_index,\n less_than_pivot_index)\n else:\n break\n unsorted_array[pivot_index] = unsorted_array[less_than_pivot_index]\n unsorted_array[less_than_pivot_index] = pivot\n return less_than_pivot_index\n\n\ndef quick_select_helper(unsorted_array, left, right, k):\n split_point = partition(unsorted_array, left, right)\n if split_point == k:\n return unsorted_array[k]\n elif split_point < k:\n return quick_select_helper(unsorted_array, split_point + 1, right, k)\n else:\n return quick_select_helper(unsorted_array, left, split_point - 1, k)\n\n\ndef quick_select(unsorted_array, k):\n quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)\n\n\n<mask token>\nquick_select(u_array, 1)\nprint(u_array)\n",
"step-4": "<mask token>\n\n\ndef swap(unsorted_array, a, b):\n temp = unsorted_array[a]\n unsorted_array[a] = unsorted_array[b]\n unsorted_array[b] = temp\n\n\ndef partition(unsorted_array, first_index, last_index):\n if first_index == last_index:\n return first_index\n pivot = unsorted_array[first_index]\n pivot_index = first_index\n index_of_last_element = last_index\n less_than_pivot_index = index_of_last_element\n greater_than_pivot_index = first_index + 1\n while True:\n while unsorted_array[greater_than_pivot_index\n ] < pivot and greater_than_pivot_index < last_index:\n greater_than_pivot_index += 1\n while unsorted_array[less_than_pivot_index\n ] > pivot and less_than_pivot_index >= first_index:\n less_than_pivot_index -= 1\n if greater_than_pivot_index < less_than_pivot_index:\n temp = unsorted_array[greater_than_pivot_index]\n swap(unsorted_array, greater_than_pivot_index,\n less_than_pivot_index)\n else:\n break\n unsorted_array[pivot_index] = unsorted_array[less_than_pivot_index]\n unsorted_array[less_than_pivot_index] = pivot\n return less_than_pivot_index\n\n\ndef quick_select_helper(unsorted_array, left, right, k):\n split_point = partition(unsorted_array, left, right)\n if split_point == k:\n return unsorted_array[k]\n elif split_point < k:\n return quick_select_helper(unsorted_array, split_point + 1, right, k)\n else:\n return quick_select_helper(unsorted_array, left, split_point - 1, k)\n\n\ndef quick_select(unsorted_array, k):\n quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)\n\n\nu_array = [43, 3, 20, 4, 89, 77]\nquick_select(u_array, 1)\nprint(u_array)\n",
"step-5": "\"\"\"\nQuick select (randomized selection algorithm)\n- based on quick sort (ch8_sorting); used to obtain the ith-smallest element in an unordered list of items (e.g.numbers)\n\n\"\"\"\n\n\ndef swap(unsorted_array, a, b):\n temp = unsorted_array[a]\n unsorted_array[a] = unsorted_array[b]\n unsorted_array[b] = temp\n\n\ndef partition(unsorted_array, first_index, last_index):\n # these 2 lines added, comparing to quick_sort partition\n # => there's only one element in our sublist => return any of the function parameters\n if first_index == last_index:\n return first_index\n\n # This choice to make the 1st element the pivot is a random decision.\n # It often does not yield a good split and subsequently a good partition.\n # However, the ith element will eventually be found.\n pivot = unsorted_array[first_index]\n pivot_index = first_index\n index_of_last_element = last_index\n\n less_than_pivot_index = index_of_last_element\n greater_than_pivot_index = first_index + 1\n\n while True:\n\n while unsorted_array[greater_than_pivot_index] < pivot and greater_than_pivot_index < last_index:\n greater_than_pivot_index += 1\n while unsorted_array[less_than_pivot_index] > pivot and less_than_pivot_index >= first_index:\n less_than_pivot_index -= 1\n\n if greater_than_pivot_index < less_than_pivot_index:\n temp = unsorted_array[greater_than_pivot_index]\n swap(unsorted_array, greater_than_pivot_index, less_than_pivot_index)\n else:\n break\n\n unsorted_array[pivot_index] = unsorted_array[less_than_pivot_index]\n unsorted_array[less_than_pivot_index] = pivot\n\n # returns the pivot index pointed to by less_than_pivot_index\n return less_than_pivot_index\n\n\n# parameters: the index of the first, the last, the ith element\ndef quick_select_helper(unsorted_array, left, right, k):\n # returns the split index = the position in the unordered list where\n # all elements between right to split-1 are < the element contained in the array split,\n # while all elements between split+1 to left are greater.\n split_point = partition(unsorted_array, left, right)\n\n if split_point == k:\n return unsorted_array[k]\n\n # => the kth-smallest item should exist/be found between split+1 and right:\n elif split_point < k:\n return quick_select_helper(unsorted_array, split_point + 1, right, k)\n else:\n return quick_select_helper(unsorted_array, left, split_point - 1, k)\n\n\ndef quick_select(unsorted_array, k):\n quick_select_helper(unsorted_array, 0, len(unsorted_array) - 1, k)\n\nu_array = [43, 3, 20, 4, 89, 77]\nquick_select(u_array, 1)\nprint(u_array)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from load_blender_data import pose_spherical
from misc import mse, mse2psnr, to8b
import os
import imageio
import json
import torch
import torch.nn as nn
import numpy as np
import cv2
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
class MLP(nn.Module):
def __init__(self, in_ch=2, num_layers=4, num_neurons=256):
super(MLP, self).__init__()
layers = []
layers.append(nn.Linear(in_ch, num_neurons))
layers.append(nn.ReLU())
for i in range(1, num_layers-1):
layers.append(nn.Linear(num_neurons, num_neurons))
layers.append(nn.ReLU())
layers.append(nn.Linear(num_neurons, 3))
layers.append(nn.Sigmoid())
self.linears = nn.ModuleList(layers)
def forward(self, x):
for layer in self.linears:
x = layer(x)
return x
class BlenderDataset(Dataset):
def __init__(self, datadir, split='train', testskip=8):
super(BlenderDataset, self).__init__()
imgs = []
with open(os.path.join(datadir, split+".txt")) as f:
lines = f.readlines()
for i, line in enumerate(lines):
name = line.strip()
pose_path = os.path.join(datadir, name, 'rendering/transforms.json')
with open(pose_path, 'r') as f:
cam_params = json.load(f)['frames']
for cam_param in cam_params:
img_name = cam_param['file_path']
imgs.append(os.path.join(datadir, name, f'rendering/{img_name}.png'))
self.images = imgs
print(f'{split} dataset: {len(self.images)}')
def get_rays_np(self, H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3],
-1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
return rays_o, rays_d
# def __getitem__(self, idx):
# img = self.images[idx]
# pose = self.poses[idx]
# H, W = img.shape[:2]
# rays_o, rays_d = self.get_rays_np(H, W, self.focal, pose)
# # ret = {'img':img.transpose((2, 0, 1)),
# # 'rays_o': rays_o.transpose((2, 0, 1)),
# # 'rays_d': rays_d.transpose((2, 0, 1))}
# ret = {'img': img,
# 'rays_o': rays_o,
# 'rays_d': rays_d}
# return ret
def get_coords2d(self, H, W):
coord = np.linspace(0, 1, H, endpoint=False)
coords = np.stack(np.meshgrid(coord, coord), -1)
return coords
def __getitem__(self, idx):
img_path = self.images[idx]
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) / 255.
H, W = img.shape[:2]
rays_o = self.get_coords2d(H, W)
ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.float32)}
return ret
def __len__(self):
return len(self.images)
class MLPRunner(object):
def __init__(self, args):
self.basedir = args.basedir
self.expname = args.expname
self.num_layers = 4
self.num_neurons = 256
self.mapping_size = 256
self.num_epoch = 1000 # on average, each image is seen by network num_epoch times
self.val_epoch = 100
self.lr = 1e-4
self.batch_size = args.batch_size
self.num_workers = args.num_workers
self.train_set = BlenderDataset(args.datadir, split='train')
self.train_loader = DataLoader(self.train_set,
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=True)
self.val_set = BlenderDataset(args.datadir, split='val')
self.val_idxs = [i for i in range(len(self.val_set))]
self.i_print = 1000
self.scale = 10
self.in_ch = self.mapping_size * 2
self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)
self.model = MLP(in_ch=self.in_ch)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
def embed(self, x, B):
if B is None:
return x
else:
x_proj = (2. * np.pi * x).matmul(B.transpose(1, 0))
return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)
def train(self):
self.model.to(device)
global_step = 0
for epoch in range(self.num_epoch):
for i, data in enumerate(self.train_loader):
img = data['img'].to(device)
rays_o = data['rays_o'].to(device)
embedding = self.embed(rays_o, self.B_gauss)
embedding = embedding.reshape((-1, embedding.shape[-1]))
img_pred = self.model.forward(embedding)
img_pred = img_pred.reshape(img.shape)
loss = mse(img_pred, img)
psnr = mse2psnr(loss)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if global_step % self.i_print == 0:
print(f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}')
# cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_gt_{epoch}_{global_step}.png'),
# to8b(img[0].detach().cpu().numpy()))
cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_{epoch}_{global_step}.png'),
to8b(img_pred[0].detach().cpu().numpy()))
global_step += 1
if epoch % self.val_epoch == 0:
idx = np.random.choice(self.val_idxs, 1)[0]
data = self.val_set.__getitem__(idx)
img = torch.tensor(data['img']).to(device)
rays_o = torch.tensor(data['rays_o']).to(device)
with torch.no_grad():
embedding = self.embed(rays_o, self.B_gauss)
embedding = embedding.reshape((-1, embedding.shape[-1]))
img_pred = self.model.forward(embedding)
img_pred = img_pred.reshape(img.shape)
loss = mse(img_pred, img)
psnr = mse2psnr(loss)
print(f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}')
# cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_gt_{epoch}_{global_step}.png'),
# to8b(img.detach().cpu().numpy()))
cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_{epoch}_{global_step}.png'),
to8b(img_pred.detach().cpu().numpy()))
|
normal
|
{
"blob_id": "7180dc0d622fd449fcee32f2c50000d05ae2d8bb",
"index": 6850,
"step-1": "<mask token>\n\n\nclass BlenderDataset(Dataset):\n <mask token>\n <mask token>\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n <mask token>\n <mask token>\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n",
"step-2": "<mask token>\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n <mask token>\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n",
"step-3": "<mask token>\n\n\nclass MLP(nn.Module):\n\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers - 1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n <mask token>\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H,\n dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * 0.5) / focal, -(j - H * 0.5) / focal, -np\n .ones_like(i)], -1)\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n",
"step-4": "<mask token>\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device(\n 'cpu')\n\n\nclass MLP(nn.Module):\n\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers - 1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n\n def forward(self, x):\n for layer in self.linears:\n x = layer(x)\n return x\n\n\nclass BlenderDataset(Dataset):\n\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split + '.txt')) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name,\n 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name,\n f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H,\n dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * 0.5) / focal, -(j - H * 0.5) / focal, -np\n .ones_like(i)], -1)\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR\n ) / 255.0\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.\n float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000\n self.val_epoch = 100\n self.lr = 0.0001\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set, batch_size=self.\n batch_size, num_workers=self.num_workers, shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2.0 * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if global_step % self.i_print == 0:\n print(\n f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'train_{epoch}_{global_step}.png'), to8b(img_pred[\n 0].detach().cpu().numpy()))\n global_step += 1\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(\n f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}'\n )\n cv2.imwrite(os.path.join(self.basedir, self.expname,\n f'val_{epoch}_{global_step}.png'), to8b(img_pred.\n detach().cpu().numpy()))\n",
"step-5": "from load_blender_data import pose_spherical\nfrom misc import mse, mse2psnr, to8b\n\nimport os\nimport imageio\nimport json\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport cv2\n\n\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.dataloader import DataLoader\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\nclass MLP(nn.Module):\n def __init__(self, in_ch=2, num_layers=4, num_neurons=256):\n super(MLP, self).__init__()\n layers = []\n layers.append(nn.Linear(in_ch, num_neurons))\n layers.append(nn.ReLU())\n for i in range(1, num_layers-1):\n layers.append(nn.Linear(num_neurons, num_neurons))\n layers.append(nn.ReLU())\n layers.append(nn.Linear(num_neurons, 3))\n layers.append(nn.Sigmoid())\n self.linears = nn.ModuleList(layers)\n\n def forward(self, x):\n for layer in self.linears:\n x = layer(x)\n return x\n\nclass BlenderDataset(Dataset):\n def __init__(self, datadir, split='train', testskip=8):\n super(BlenderDataset, self).__init__()\n imgs = []\n with open(os.path.join(datadir, split+\".txt\")) as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n name = line.strip()\n pose_path = os.path.join(datadir, name, 'rendering/transforms.json')\n with open(pose_path, 'r') as f:\n cam_params = json.load(f)['frames']\n for cam_param in cam_params:\n img_name = cam_param['file_path']\n imgs.append(os.path.join(datadir, name, f'rendering/{img_name}.png'))\n self.images = imgs\n print(f'{split} dataset: {len(self.images)}')\n\n\n def get_rays_np(self, H, W, focal, c2w):\n i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')\n dirs = np.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -np.ones_like(i)], -1)\n # Rotate ray directions from camera frame to the world frame\n rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3],\n -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]\n # Translate camera frame's origin to the world frame. It is the origin of all rays.\n rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))\n return rays_o, rays_d\n\n # def __getitem__(self, idx):\n # img = self.images[idx]\n # pose = self.poses[idx]\n # H, W = img.shape[:2]\n # rays_o, rays_d = self.get_rays_np(H, W, self.focal, pose)\n # # ret = {'img':img.transpose((2, 0, 1)),\n # # 'rays_o': rays_o.transpose((2, 0, 1)),\n # # 'rays_d': rays_d.transpose((2, 0, 1))}\n # ret = {'img': img,\n # 'rays_o': rays_o,\n # 'rays_d': rays_d}\n # return ret\n\n def get_coords2d(self, H, W):\n coord = np.linspace(0, 1, H, endpoint=False)\n coords = np.stack(np.meshgrid(coord, coord), -1)\n return coords\n\n def __getitem__(self, idx):\n img_path = self.images[idx]\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR) / 255.\n H, W = img.shape[:2]\n rays_o = self.get_coords2d(H, W)\n ret = {'img': img.astype(np.float32), 'rays_o': rays_o.astype(np.float32)}\n return ret\n\n def __len__(self):\n return len(self.images)\n\n\nclass MLPRunner(object):\n def __init__(self, args):\n self.basedir = args.basedir\n self.expname = args.expname\n\n self.num_layers = 4\n self.num_neurons = 256\n self.mapping_size = 256\n self.num_epoch = 1000 # on average, each image is seen by network num_epoch times\n self.val_epoch = 100\n self.lr = 1e-4\n\n self.batch_size = args.batch_size\n self.num_workers = args.num_workers\n self.train_set = BlenderDataset(args.datadir, split='train')\n self.train_loader = DataLoader(self.train_set,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=True)\n self.val_set = BlenderDataset(args.datadir, split='val')\n self.val_idxs = [i for i in range(len(self.val_set))]\n\n self.i_print = 1000\n self.scale = 10\n self.in_ch = self.mapping_size * 2\n self.B_gauss = torch.randn((self.mapping_size, 2)).to(device)\n self.model = MLP(in_ch=self.in_ch)\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n\n def embed(self, x, B):\n if B is None:\n return x\n else:\n x_proj = (2. * np.pi * x).matmul(B.transpose(1, 0))\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], -1)\n\n def train(self):\n self.model.to(device)\n global_step = 0\n for epoch in range(self.num_epoch):\n for i, data in enumerate(self.train_loader):\n img = data['img'].to(device)\n rays_o = data['rays_o'].to(device)\n embedding = self.embed(rays_o, self.B_gauss)\n\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n if global_step % self.i_print == 0:\n print(f'[{epoch} | {global_step}] loss:{loss.item()} psnr:{psnr.item()}')\n # cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_gt_{epoch}_{global_step}.png'),\n # to8b(img[0].detach().cpu().numpy()))\n cv2.imwrite(os.path.join(self.basedir, self.expname, f'train_{epoch}_{global_step}.png'),\n to8b(img_pred[0].detach().cpu().numpy()))\n global_step += 1\n\n if epoch % self.val_epoch == 0:\n idx = np.random.choice(self.val_idxs, 1)[0]\n data = self.val_set.__getitem__(idx)\n img = torch.tensor(data['img']).to(device)\n rays_o = torch.tensor(data['rays_o']).to(device)\n with torch.no_grad():\n embedding = self.embed(rays_o, self.B_gauss)\n\n embedding = embedding.reshape((-1, embedding.shape[-1]))\n img_pred = self.model.forward(embedding)\n img_pred = img_pred.reshape(img.shape)\n loss = mse(img_pred, img)\n psnr = mse2psnr(loss)\n print(f'[{epoch} | val] loss:{loss.item()} psnr:{psnr.item()}')\n # cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_gt_{epoch}_{global_step}.png'),\n # to8b(img.detach().cpu().numpy()))\n cv2.imwrite(os.path.join(self.basedir, self.expname, f'val_{epoch}_{global_step}.png'),\n to8b(img_pred.detach().cpu().numpy()))\n",
"step-ids": [
6,
9,
12,
14,
16
]
}
|
[
6,
9,
12,
14,
16
] |
from django.apps import AppConfig
class LaughsappConfig(AppConfig):
name = 'laughsApp'
|
normal
|
{
"blob_id": "6b785502e8a8983c164ebdffdd304da47c926acb",
"index": 774,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LaughsappConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass LaughsappConfig(AppConfig):\n name = 'laughsApp'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass LaughsappConfig(AppConfig):\n name = 'laughsApp'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from unittest.mock import MagicMock
import pytest
from charpe.mediums.email_handler import EmailHandler
from charpe.errors import InsuficientInformation
def test_send_requirements(config):
handler = EmailHandler(config)
with pytest.raises(InsuficientInformation):
handler.publish({})
with pytest.raises(InsuficientInformation):
handler.publish({
'recipient': '[email protected]',
})
with pytest.raises(InsuficientInformation):
handler.publish({
'recipient': '[email protected]',
'subject': 'The subject',
})
def test_send(config, caplog, mocker):
the_mock = MagicMock()
smoke = MagicMock(return_value=the_mock)
mocker.patch('smtplib.SMTP', new=smoke)
handler = EmailHandler(config)
handler.publish({
'recipient': '[email protected]',
'subject': 'The subject',
'data': {
'content': 'El mensaje',
},
})
the_mock.send_message.assert_called_once()
msg = the_mock.send_message.call_args[0][0]
assert msg.get('From') == config['MAIL_DEFAULT_SENDER']
assert msg.get('To') == '[email protected]'
html, text = msg.get_payload()
assert 'El mensaje' in text.get_payload()
assert '<p>El mensaje</p>' in html.get_payload()
the_mock.quit.assert_called_once()
|
normal
|
{
"blob_id": "e2d8a1e13a4162cd606eec12530451ab230c95b6",
"index": 3103,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_send_requirements(config):\n handler = EmailHandler(config)\n with pytest.raises(InsuficientInformation):\n handler.publish({})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]'})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]', 'subject':\n 'The subject'})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_send_requirements(config):\n handler = EmailHandler(config)\n with pytest.raises(InsuficientInformation):\n handler.publish({})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]'})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]', 'subject':\n 'The subject'})\n\n\ndef test_send(config, caplog, mocker):\n the_mock = MagicMock()\n smoke = MagicMock(return_value=the_mock)\n mocker.patch('smtplib.SMTP', new=smoke)\n handler = EmailHandler(config)\n handler.publish({'recipient': '[email protected]', 'subject':\n 'The subject', 'data': {'content': 'El mensaje'}})\n the_mock.send_message.assert_called_once()\n msg = the_mock.send_message.call_args[0][0]\n assert msg.get('From') == config['MAIL_DEFAULT_SENDER']\n assert msg.get('To') == '[email protected]'\n html, text = msg.get_payload()\n assert 'El mensaje' in text.get_payload()\n assert '<p>El mensaje</p>' in html.get_payload()\n the_mock.quit.assert_called_once()\n",
"step-4": "from unittest.mock import MagicMock\nimport pytest\nfrom charpe.mediums.email_handler import EmailHandler\nfrom charpe.errors import InsuficientInformation\n\n\ndef test_send_requirements(config):\n handler = EmailHandler(config)\n with pytest.raises(InsuficientInformation):\n handler.publish({})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]'})\n with pytest.raises(InsuficientInformation):\n handler.publish({'recipient': '[email protected]', 'subject':\n 'The subject'})\n\n\ndef test_send(config, caplog, mocker):\n the_mock = MagicMock()\n smoke = MagicMock(return_value=the_mock)\n mocker.patch('smtplib.SMTP', new=smoke)\n handler = EmailHandler(config)\n handler.publish({'recipient': '[email protected]', 'subject':\n 'The subject', 'data': {'content': 'El mensaje'}})\n the_mock.send_message.assert_called_once()\n msg = the_mock.send_message.call_args[0][0]\n assert msg.get('From') == config['MAIL_DEFAULT_SENDER']\n assert msg.get('To') == '[email protected]'\n html, text = msg.get_payload()\n assert 'El mensaje' in text.get_payload()\n assert '<p>El mensaje</p>' in html.get_payload()\n the_mock.quit.assert_called_once()\n",
"step-5": "from unittest.mock import MagicMock\nimport pytest\n\nfrom charpe.mediums.email_handler import EmailHandler\nfrom charpe.errors import InsuficientInformation\n\n\ndef test_send_requirements(config):\n handler = EmailHandler(config)\n\n with pytest.raises(InsuficientInformation):\n handler.publish({})\n\n with pytest.raises(InsuficientInformation):\n handler.publish({\n 'recipient': '[email protected]',\n })\n\n with pytest.raises(InsuficientInformation):\n handler.publish({\n 'recipient': '[email protected]',\n 'subject': 'The subject',\n })\n\n\ndef test_send(config, caplog, mocker):\n the_mock = MagicMock()\n smoke = MagicMock(return_value=the_mock)\n mocker.patch('smtplib.SMTP', new=smoke)\n\n handler = EmailHandler(config)\n\n handler.publish({\n 'recipient': '[email protected]',\n 'subject': 'The subject',\n 'data': {\n 'content': 'El mensaje',\n },\n })\n\n the_mock.send_message.assert_called_once()\n msg = the_mock.send_message.call_args[0][0]\n\n assert msg.get('From') == config['MAIL_DEFAULT_SENDER']\n assert msg.get('To') == '[email protected]'\n\n html, text = msg.get_payload()\n\n assert 'El mensaje' in text.get_payload()\n assert '<p>El mensaje</p>' in html.get_payload()\n\n the_mock.quit.assert_called_once()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
quotes = [
"Today you are you! That is truer than true! There is no one alive who is you-er than you!",
"Don't cry because it's over. Smile because it happened.",
"You have brains in your head. You have feet in your shoes. You can steer yourself in any direction you choose. You're on your own, and you know what you know. And you are the guy who'll decide where to go.",
"The more that you read, the more things you will know. The more that you learn, the more places you'll go. ",
"I like nonsense; it wakes up the brain cells.",
"Step with care and great tact, and remember that Life's a Great Balancing Act.",
"How did it get so late so soon? Its night before its afternoon. December is here before its June. My goodness how the time has flewn. How did it get so late so soon?",
"Think left and think right and think low and think high. Oh, the thinks you can think up if only you try!",
"A person's a person, no matter how small.",
"You can get help from teachers, but you are going to have to learn a lot by yourself, sitting alone in a room.",
"Unless someone like you cares a whole awful lot, nothing is going to get better. It's not.",
"You're never too old, too wacky, too wild, to pick up a book and read to a child.",
"Today was good. Today was fun. Tomorrow is another one.",
"I meant what I said and I said what I meant.",
"You're in pretty good shape for the shape you are in.",
"Only you can control your future.",
"I am not a consecutive writer.",
"Maybe Christmas, the Grinch thought, doesn't come from a store.",
"Preachers in pulpits talked about what a great message is in the book. No matter what you do, somebody always imputes meaning into your books.",
"Sometimes, when I see my granddaughters make small discoveries of their own, I wish I were a child.",
"Adults are obsolete children.",
"Whenever things go a bit sour in a job I'm doing, I always tell myself, 'You can do better than this.'",
"From there to here, and here to there, funny things are everywhere.",
"I stay out of politics because if I begin thinking too much about politics, I'll probably... drop writing children's books and become a political cartoonist again.",
"I was saving the name of 'Geisel' for the Great American Novel.",
"You make 'em, I amuse 'em."
]
|
normal
|
{
"blob_id": "f9ba944724b262afb39f2859b5726b961536cdf0",
"index": 2092,
"step-1": "<mask token>\n",
"step-2": "quotes = [\n 'Today you are you! That is truer than true! There is no one alive who is you-er than you!'\n , \"Don't cry because it's over. Smile because it happened.\",\n \"You have brains in your head. You have feet in your shoes. You can steer yourself in any direction you choose. You're on your own, and you know what you know. And you are the guy who'll decide where to go.\"\n ,\n \"The more that you read, the more things you will know. The more that you learn, the more places you'll go. \"\n , 'I like nonsense; it wakes up the brain cells.',\n \"Step with care and great tact, and remember that Life's a Great Balancing Act.\"\n ,\n 'How did it get so late so soon? Its night before its afternoon. December is here before its June. My goodness how the time has flewn. How did it get so late so soon?'\n ,\n 'Think left and think right and think low and think high. Oh, the thinks you can think up if only you try!'\n , \"A person's a person, no matter how small.\",\n 'You can get help from teachers, but you are going to have to learn a lot by yourself, sitting alone in a room.'\n ,\n \"Unless someone like you cares a whole awful lot, nothing is going to get better. It's not.\"\n ,\n \"You're never too old, too wacky, too wild, to pick up a book and read to a child.\"\n , 'Today was good. Today was fun. Tomorrow is another one.',\n 'I meant what I said and I said what I meant.',\n \"You're in pretty good shape for the shape you are in.\",\n 'Only you can control your future.', 'I am not a consecutive writer.',\n \"Maybe Christmas, the Grinch thought, doesn't come from a store.\",\n 'Preachers in pulpits talked about what a great message is in the book. No matter what you do, somebody always imputes meaning into your books.'\n ,\n 'Sometimes, when I see my granddaughters make small discoveries of their own, I wish I were a child.'\n , 'Adults are obsolete children.',\n \"Whenever things go a bit sour in a job I'm doing, I always tell myself, 'You can do better than this.'\"\n , 'From there to here, and here to there, funny things are everywhere.',\n \"I stay out of politics because if I begin thinking too much about politics, I'll probably... drop writing children's books and become a political cartoonist again.\"\n , \"I was saving the name of 'Geisel' for the Great American Novel.\",\n \"You make 'em, I amuse 'em.\"]\n",
"step-3": "quotes = [\n\"Today you are you! That is truer than true! There is no one alive who is you-er than you!\",\n\"Don't cry because it's over. Smile because it happened.\",\n\"You have brains in your head. You have feet in your shoes. You can steer yourself in any direction you choose. You're on your own, and you know what you know. And you are the guy who'll decide where to go.\",\n\"The more that you read, the more things you will know. The more that you learn, the more places you'll go. \",\n\"I like nonsense; it wakes up the brain cells.\",\n\"Step with care and great tact, and remember that Life's a Great Balancing Act.\",\n\"How did it get so late so soon? Its night before its afternoon. December is here before its June. My goodness how the time has flewn. How did it get so late so soon?\",\n\"Think left and think right and think low and think high. Oh, the thinks you can think up if only you try!\",\n\"A person's a person, no matter how small.\",\n\"You can get help from teachers, but you are going to have to learn a lot by yourself, sitting alone in a room.\",\n\"Unless someone like you cares a whole awful lot, nothing is going to get better. It's not.\",\n\"You're never too old, too wacky, too wild, to pick up a book and read to a child.\",\n\"Today was good. Today was fun. Tomorrow is another one.\",\n\"I meant what I said and I said what I meant.\", \n\"You're in pretty good shape for the shape you are in.\",\n\"Only you can control your future.\",\n\"I am not a consecutive writer.\",\n\"Maybe Christmas, the Grinch thought, doesn't come from a store.\",\n\"Preachers in pulpits talked about what a great message is in the book. No matter what you do, somebody always imputes meaning into your books.\", \n\"Sometimes, when I see my granddaughters make small discoveries of their own, I wish I were a child.\",\n\"Adults are obsolete children.\",\n\"Whenever things go a bit sour in a job I'm doing, I always tell myself, 'You can do better than this.'\",\n\"From there to here, and here to there, funny things are everywhere.\",\n\"I stay out of politics because if I begin thinking too much about politics, I'll probably... drop writing children's books and become a political cartoonist again.\",\n\"I was saving the name of 'Geisel' for the Great American Novel.\",\n\"You make 'em, I amuse 'em.\"\n]",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render, HttpResponse, redirect
from .models import Book, Author # This is the models.py Database
# Create your views here.
def main(request):
context = {
"the_books" : Book.objects.all(), #Book Class model.py
}
return render(request, "index.html", context)
def book(request):
Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])
return redirect('/')
def author(request):
context = {
"the_auths" : Author.objects.all(), #Author Class model.py
}
return render(request, "author.html", context)
def auth(request):
Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])
# newA = Author(first_name= "jlkj")
# newA.save()
return redirect('/author')
def authInfo(request, authorid):
context = {
'selectedAuthor' : Author.objects.get(id=authorid)
}
return render(request, "author_info.html", context)
def bookInfo(request, bookid):
context = {
'selectedBook' : Book.objects.get(id=bookid),
'allAuthors' : Author.objects.all()
}
return render(request, "book_info.html", context)
def authUpdate(request, bookid):
this_book = Book.objects.get(id=bookid)
this_auth = Author.objects.get(id = request.POST['chosenAuth'])
this_book.authors.add(this_auth)
return redirect(f"/bookinfo/{bookid}")
|
normal
|
{
"blob_id": "02bec34b138d53235dc944adeae8ccb8d6b3d340",
"index": 4424,
"step-1": "<mask token>\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-2": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\n<mask token>\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-3": "<mask token>\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-4": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author\n\n\ndef main(request):\n context = {'the_books': Book.objects.all()}\n return render(request, 'index.html', context)\n\n\ndef book(request):\n Book.objects.create(title=request.POST['b_title'], desc=request.POST[\n 'b_desc'])\n return redirect('/')\n\n\ndef author(request):\n context = {'the_auths': Author.objects.all()}\n return render(request, 'author.html', context)\n\n\ndef auth(request):\n Author.objects.create(first_name=request.POST['a_first'], last_name=\n request.POST['a_last'], notes=request.POST['a_notes'])\n return redirect('/author')\n\n\ndef authInfo(request, authorid):\n context = {'selectedAuthor': Author.objects.get(id=authorid)}\n return render(request, 'author_info.html', context)\n\n\ndef bookInfo(request, bookid):\n context = {'selectedBook': Book.objects.get(id=bookid), 'allAuthors':\n Author.objects.all()}\n return render(request, 'book_info.html', context)\n\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id=request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f'/bookinfo/{bookid}')\n",
"step-5": "from django.shortcuts import render, HttpResponse, redirect\nfrom .models import Book, Author # This is the models.py Database\n\n# Create your views here.\n\ndef main(request):\n context = {\n \"the_books\" : Book.objects.all(), #Book Class model.py\n }\n return render(request, \"index.html\", context)\n\ndef book(request):\n Book.objects.create(title = request.POST['b_title'], desc = request.POST['b_desc'])\n return redirect('/')\n\ndef author(request):\n context = {\n \"the_auths\" : Author.objects.all(), #Author Class model.py\n }\n return render(request, \"author.html\", context)\n\ndef auth(request):\n Author.objects.create(first_name = request.POST['a_first'], last_name = request.POST['a_last'], notes = request.POST['a_notes'])\n # newA = Author(first_name= \"jlkj\")\n # newA.save()\n return redirect('/author')\n\ndef authInfo(request, authorid):\n context = {\n 'selectedAuthor' : Author.objects.get(id=authorid)\n }\n return render(request, \"author_info.html\", context)\n\ndef bookInfo(request, bookid):\n context = {\n 'selectedBook' : Book.objects.get(id=bookid),\n 'allAuthors' : Author.objects.all()\n }\n return render(request, \"book_info.html\", context)\n\ndef authUpdate(request, bookid):\n this_book = Book.objects.get(id=bookid)\n this_auth = Author.objects.get(id = request.POST['chosenAuth'])\n this_book.authors.add(this_auth)\n return redirect(f\"/bookinfo/{bookid}\")",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from bs4 import BeautifulSoup
import urllib2
import datetime
import re
import csv
import sys
import time
import bb_load as bb_l
import pandas as pd
import requests
#Scrape the web for new buybacks
def scrape_buybacks():
'''
(NoneType) -> scraped_database.csv, database=open('scrape_database.csv', 'r')
Version 3.0, MSP @ 11:00 04.06.16
'''
#Define some of the variables used
start_time = time.time()
stock_list = []
date_list = []
bb_list = []
not_added = int(0)
full_switch = 'y'
#Load reference database by external function
try:
existing_database = read_existing_scrapefile()
print ('Comparing existing database to new buybacks.')
first = existing_database[0]
first_date = first[0:first.find(',')]
full_switch = raw_input('Do a full search beyond the most recent date '\
+'in database? y/n: ')
except (IOError, Warning):
print 'Warning: No prior database available.', '\n' \
'No reference check will be conducted; proceed with a new database file.', '\n'
existing_database = []
first_date = 0
#Run a for loop to scrape all 5 pages of data
for numb in ('1', '2', '3', '4', '5'):
url = ("http://www.rttnews.com/CorpInfo/StockBuybacks.aspx?PageNum=" + numb)
try: #Scrape the page
soup = BeautifulSoup(requests.get(url).content, "html.parser")
except (Warning, IOError): #Inform of any problems
print 'Failed to scrape page number ' + numb + '.' + '\n' \
'The remote host could have terminated the connection.' + '\n' \
'Scraping terminated; try to run the program again.'
sys.exit(0)
end_search = False
#Scrape the relevant info for all announcements in ODD rows
for item in soup.select(".ecoCalContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Scrape the relevant info for all announcements in EVEN rows
for item in soup.select(".ecoCalAltContent"):
count = 0
#Scrape the relevant info for an individual announcement
for numb in ["1","2","3","4","5","6"]:
string = ".tblContent" + numb
count = count + 1
start = int(str(item.select(string)).find('">') + 2)
stop = int(str(item.select(string)).find('</'))
extract = str(item.select(string))[start:stop]
if count == 1:
date = extract
y = int(date[date.rfind("/")+1:len(date)])+2000
try:
d = int(date[date.find("/")+1:len(date)-date.find("/")-2])
except ValueError:
d = 1
m = int(date[0:date.find("/")])
date = datetime.datetime(y,m,d).strftime("%Y-%m-%d")
if count == 2:
ticker = extract[extract.find(">")+1:len(extract)]
if ticker.find(",") > 0:
while ticker.count(",") > 1: # strip until unly one comma left
ticker = ticker[ticker.find(",")+1:len(ticker)] # Strip before first comma
ticker = ticker[0:ticker.find(",")] # Strip after second comma
if ticker.find(".") > 0:
ticker = ticker[0:ticker.find(".")]
ticker = filter(str.isupper, ticker)
if count == 4:
buyback = extract
unit = buyback.join(re.findall("[a-zA-Z]+", buyback))
val = re.findall(r"[-+]?\d*\.\d+|\d+", buyback)
val = float(val[0])
if unit == "":
val = val / 1000000
elif unit == "K":
val = val / 1000
elif unit == "Bln":
val = val * 1000
date_list.append(date)
stock_list.append(ticker)
bb_list.append(val)
#Build the aggregated list and removing buybacks
#already in the existing buyback database
teststr = str(date)+','+str(ticker)+','+str(val)
if teststr in existing_database:
date_list.pop()
stock_list.pop()
bb_list.pop()
not_added = not_added + 1
#Make a master list
master = [date_list, stock_list, bb_list]
with open('scrape_database.csv', 'ab') as scrapefile:
file_writer = csv.writer(scrapefile)
for i in range(len(master[0])):
file_writer.writerow([x[i] for x in master])
sort_existing_scrapefile()
print '\n', '---------------------------------------------------------'
print 'MODULE: NEW SHARE BUYBACKS FROM STOCKMAVEN.COM.'
print 'Output: ' + str(len(date_list)) + \
' buyback(s) added to scrape_database.csv.'
print ' ' + str(not_added) + ' buyback(s) scraped but not added to database'
print 'Run-time:', "%.2f" %(time.time() - start_time), 'sec'
print '---------------------------------------------------------' + '\n'
#Read the existing scrapefile into a list for comparison
def read_existing_scrapefile():
'''
(file open for reading) -> list of str
Read and return each row in the scrapefile
comprising date, ticker, and amount of a buyback and return
a list of strings containing this information
Precondition: the file scrapefile.csv must be available in
the root directory
'''
scrape_database = open('scrape_database.csv','r')
line = scrape_database.readline().strip('\n')
existing_database = []
while line !='':
existing_database.append(str(line))
line = scrape_database.readline().strip('\n')
scrape_database.close()
return existing_database
# Sort the existing scrapefile by descending dates
def sort_existing_scrapefile():
'''
Version update: MSP @ 00:12 29.04.14
( ) -> ( )
Sort the buyback database (scrape_database.csv) by descending dates.
'''
c = bb_l.load_buyback_df(-1,-1).T.sort('Date',ascending=False)
d = c.index.tolist()
c['Ticker'] = d
e = c['Date'].tolist()
f = c[['Ticker','Amount']]
f.index = e
f.to_csv('scrape_database.csv', header=False)
|
normal
|
{
"blob_id": "276bcb2e90c30f87c618106e5e862f00d082da34",
"index": 9224,
"step-1": "\r\nfrom bs4 import BeautifulSoup\r\nimport urllib2\r\nimport datetime\r\nimport re\r\nimport csv\r\nimport sys\r\nimport time\r\nimport bb_load as bb_l\r\nimport pandas as pd\r\nimport requests\r\n\r\n#Scrape the web for new buybacks\r\ndef scrape_buybacks():\r\n\r\n '''\r\n\r\n (NoneType) -> scraped_database.csv, database=open('scrape_database.csv', 'r')\r\n\r\n\r\n Version 3.0, MSP @ 11:00 04.06.16\r\n \r\n '''\r\n\r\n\r\n #Define some of the variables used\r\n start_time = time.time()\r\n stock_list = []\r\n date_list = []\r\n bb_list = []\r\n not_added = int(0)\r\n full_switch = 'y'\r\n\r\n #Load reference database by external function\r\n try:\r\n existing_database = read_existing_scrapefile()\r\n print ('Comparing existing database to new buybacks.')\r\n first = existing_database[0]\r\n first_date = first[0:first.find(',')]\r\n full_switch = raw_input('Do a full search beyond the most recent date '\\\r\n +'in database? y/n: ')\r\n except (IOError, Warning):\r\n print 'Warning: No prior database available.', '\\n' \\\r\n 'No reference check will be conducted; proceed with a new database file.', '\\n'\r\n existing_database = []\r\n first_date = 0\r\n \r\n \r\n #Run a for loop to scrape all 5 pages of data\r\n for numb in ('1', '2', '3', '4', '5'):\r\n url = (\"http://www.rttnews.com/CorpInfo/StockBuybacks.aspx?PageNum=\" + numb)\r\n\r\n try: #Scrape the page\r\n soup = BeautifulSoup(requests.get(url).content, \"html.parser\")\r\n\r\n except (Warning, IOError): #Inform of any problems\r\n print 'Failed to scrape page number ' + numb + '.' + '\\n' \\\r\n 'The remote host could have terminated the connection.' + '\\n' \\\r\n 'Scraping terminated; try to run the program again.'\r\n sys.exit(0)\r\n\r\n \r\n end_search = False\r\n\r\n #Scrape the relevant info for all announcements in ODD rows\r\n for item in soup.select(\".ecoCalContent\"):\r\n count = 0\r\n \r\n #Scrape the relevant info for an individual announcement\r\n for numb in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]:\r\n string = \".tblContent\" + numb\r\n count = count + 1\r\n \r\n start = int(str(item.select(string)).find('\">') + 2)\r\n stop = int(str(item.select(string)).find('</'))\r\n \r\n extract = str(item.select(string))[start:stop]\r\n\r\n if count == 1:\r\n date = extract\r\n y = int(date[date.rfind(\"/\")+1:len(date)])+2000\r\n try:\r\n d = int(date[date.find(\"/\")+1:len(date)-date.find(\"/\")-2])\r\n except ValueError:\r\n d = 1\r\n m = int(date[0:date.find(\"/\")])\r\n date = datetime.datetime(y,m,d).strftime(\"%Y-%m-%d\")\r\n \r\n if count == 2:\r\n ticker = extract[extract.find(\">\")+1:len(extract)]\r\n\r\n if ticker.find(\",\") > 0: \r\n while ticker.count(\",\") > 1: # strip until unly one comma left\r\n ticker = ticker[ticker.find(\",\")+1:len(ticker)] # Strip before first comma\r\n ticker = ticker[0:ticker.find(\",\")] # Strip after second comma\r\n if ticker.find(\".\") > 0: \r\n ticker = ticker[0:ticker.find(\".\")]\r\n\r\n ticker = filter(str.isupper, ticker)\r\n \r\n if count == 4:\r\n buyback = extract\r\n unit = buyback.join(re.findall(\"[a-zA-Z]+\", buyback))\r\n val = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", buyback)\r\n val = float(val[0])\r\n\r\n if unit == \"\":\r\n val = val / 1000000\r\n elif unit == \"K\":\r\n val = val / 1000\r\n elif unit == \"Bln\":\r\n val = val * 1000\r\n \r\n date_list.append(date)\r\n stock_list.append(ticker)\r\n bb_list.append(val)\r\n\r\n #Build the aggregated list and removing buybacks\r\n #already in the existing buyback database\r\n\r\n teststr = str(date)+','+str(ticker)+','+str(val)\r\n \r\n if teststr in existing_database:\r\n date_list.pop()\r\n stock_list.pop()\r\n bb_list.pop()\r\n not_added = not_added + 1\r\n\r\n #Scrape the relevant info for all announcements in EVEN rows\r\n for item in soup.select(\".ecoCalAltContent\"):\r\n count = 0\r\n \r\n #Scrape the relevant info for an individual announcement\r\n for numb in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\"]:\r\n string = \".tblContent\" + numb\r\n count = count + 1\r\n \r\n start = int(str(item.select(string)).find('\">') + 2)\r\n stop = int(str(item.select(string)).find('</'))\r\n \r\n extract = str(item.select(string))[start:stop]\r\n\r\n if count == 1:\r\n date = extract\r\n y = int(date[date.rfind(\"/\")+1:len(date)])+2000\r\n try:\r\n d = int(date[date.find(\"/\")+1:len(date)-date.find(\"/\")-2])\r\n except ValueError:\r\n d = 1\r\n m = int(date[0:date.find(\"/\")])\r\n date = datetime.datetime(y,m,d).strftime(\"%Y-%m-%d\")\r\n \r\n if count == 2:\r\n ticker = extract[extract.find(\">\")+1:len(extract)]\r\n\r\n if ticker.find(\",\") > 0: \r\n while ticker.count(\",\") > 1: # strip until unly one comma left\r\n ticker = ticker[ticker.find(\",\")+1:len(ticker)] # Strip before first comma\r\n ticker = ticker[0:ticker.find(\",\")] # Strip after second comma\r\n if ticker.find(\".\") > 0: \r\n ticker = ticker[0:ticker.find(\".\")]\r\n\r\n ticker = filter(str.isupper, ticker)\r\n\r\n if count == 4:\r\n buyback = extract\r\n unit = buyback.join(re.findall(\"[a-zA-Z]+\", buyback))\r\n val = re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", buyback)\r\n val = float(val[0])\r\n\r\n if unit == \"\":\r\n val = val / 1000000\r\n elif unit == \"K\":\r\n val = val / 1000\r\n elif unit == \"Bln\":\r\n val = val * 1000\r\n \r\n date_list.append(date)\r\n stock_list.append(ticker)\r\n bb_list.append(val)\r\n\r\n #Build the aggregated list and removing buybacks\r\n #already in the existing buyback database\r\n\r\n teststr = str(date)+','+str(ticker)+','+str(val)\r\n \r\n if teststr in existing_database:\r\n date_list.pop()\r\n stock_list.pop()\r\n bb_list.pop()\r\n not_added = not_added + 1\r\n\r\n #Make a master list \r\n master = [date_list, stock_list, bb_list]\r\n\r\n with open('scrape_database.csv', 'ab') as scrapefile:\r\n file_writer = csv.writer(scrapefile)\r\n\r\n for i in range(len(master[0])):\r\n file_writer.writerow([x[i] for x in master])\r\n\r\n sort_existing_scrapefile()\r\n \r\n print '\\n', '---------------------------------------------------------'\r\n print 'MODULE: NEW SHARE BUYBACKS FROM STOCKMAVEN.COM.'\r\n print 'Output: ' + str(len(date_list)) + \\\r\n ' buyback(s) added to scrape_database.csv.'\r\n print ' ' + str(not_added) + ' buyback(s) scraped but not added to database'\r\n print 'Run-time:', \"%.2f\" %(time.time() - start_time), 'sec'\r\n print '---------------------------------------------------------' + '\\n'\r\n\r\n\r\n#Read the existing scrapefile into a list for comparison\r\ndef read_existing_scrapefile():\r\n\r\n '''\r\n (file open for reading) -> list of str\r\n\r\n Read and return each row in the scrapefile\r\n comprising date, ticker, and amount of a buyback and return\r\n a list of strings containing this information\r\n\r\n Precondition: the file scrapefile.csv must be available in\r\n the root directory\r\n \r\n '''\r\n\r\n scrape_database = open('scrape_database.csv','r')\r\n\r\n line = scrape_database.readline().strip('\\n')\r\n \r\n existing_database = []\r\n \r\n while line !='':\r\n existing_database.append(str(line))\r\n line = scrape_database.readline().strip('\\n')\r\n\r\n scrape_database.close()\r\n \r\n return existing_database \r\n\r\n# Sort the existing scrapefile by descending dates\r\ndef sort_existing_scrapefile():\r\n '''\r\n\r\n Version update: MSP @ 00:12 29.04.14\r\n \r\n ( ) -> ( )\r\n\r\n Sort the buyback database (scrape_database.csv) by descending dates.\r\n \r\n '''\r\n\r\n c = bb_l.load_buyback_df(-1,-1).T.sort('Date',ascending=False)\r\n d = c.index.tolist()\r\n c['Ticker'] = d\r\n e = c['Date'].tolist()\r\n f = c[['Ticker','Amount']]\r\n f.index = e\r\n f.to_csv('scrape_database.csv', header=False)\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# (119ms)
def isSubtree(self, s, t):
"""
:type s: TreeNode
:type t: TreeNode
:rtype: bool
"""
def traverse(root, now):
if not root:
now.append("$")
return
now.append(`root.val`)
traverse(root.left, now)
traverse(root.right, now)
s_list, t_list = [], []
traverse(s, s_list)
traverse(t, t_list)
s_str, t_str= "," + ",".join(s_list), "," + ",".join(t_list)
return t_str in s_str
|
normal
|
{
"blob_id": "5ac4dd62d8e56c7baf38f9fe9f8b4a5034f1cb80",
"index": 192,
"step-1": "# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n# (119ms)\n def isSubtree(self, s, t):\n \"\"\"\n :type s: TreeNode\n :type t: TreeNode\n :rtype: bool\n \"\"\"\n def traverse(root, now):\n if not root:\n now.append(\"$\")\n return\n now.append(`root.val`)\n traverse(root.left, now)\n traverse(root.right, now)\n s_list, t_list = [], []\n traverse(s, s_list)\n traverse(t, t_list)\n s_str, t_str= \",\" + \",\".join(s_list), \",\" + \",\".join(t_list)\n return t_str in s_str",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python3
# The uploader service listens for connections from localhost on port 3961.
# It expects a JSON object on a line by itself as the request. It responds
# with another JSON object on a line by itself, then closes the connection.
# Atropine CGI scripts can send requests to this service to tell it to:
# * Add a tourney to the list of tourneys we're periodically uploading to
# greem.co.uk
# * Remove a tourney from that list (i.e. stop uploading it)
# * Get the upload state of a tourney (are we uploading it, when was the
# last successful upload, was the last upload successful, and if not what
# went wrong)
#
# The service is started with atropine.py, and runs alongside the web server
# which serves the web interface used by the tournament administrator. At
# startup, no tourneys are being uploaded.
import sys
import os
import socketserver
from socketserver import BaseRequestHandler
import json
import threading
import time
import http.client
import traceback
http_server_host = "greem.co.uk"
http_server_port = None
http_submit_path = "/cgi-bin/colive/submit.py"
http_delete_path = "/cgi-bin/colive/submit.py"
upload_interval_sec = 10
db_dir = os.getenv("TOURNEYSPATH")
if not db_dir:
db_dir = os.path.join(os.getcwd(), "tourneys")
import tourney2json
import countdowntourney
uploader_thread = None
class FieldNotFoundException(Exception):
pass
def make_error_response(message):
return { "success" : False, "message" : message }
def make_ok_response():
return { "success" : True }
def get_game_state(tourney):
return tourney2json.get_state_for_upload(tourney)
def get_tourney_unique_id(tourney):
return tourney.get_unique_id()
def delete_tourney_from_web(tourney_name, username, password):
req = {
"username" : username,
"password" : password,
"tourney" : tourney_name,
"delete" : True
}
return make_https_json_request(http_server_host, http_server_port, http_delete_path, req)
def read_line_from_socket(sock):
byte_array = b'';
b = 0
while b != b'\n':
b = sock.recv(1)
if b is None or len(b) == 0:
return None
byte_array += b
return byte_array.decode("utf-8")
def make_https_json_request(server_host, server_port, path, request):
post_data = json.dumps(request)
httpcon = None
try:
httpcon = http.client.HTTPSConnection(host=server_host, port=server_port, timeout=30)
httpcon.connect()
except Exception as e:
if httpcon:
httpcon.close()
sys.stderr.write("Failed to connect to %s: %s\r\n" % (server_host, str(e)))
return { "success" : False, "http_failure" : True, "message" : "Failed to connect to %s: %s. Check your internet connection." % (server_host, str(e)) }
try:
while path and path[0] == '/':
path = path[1:]
url = "https://%s%s/%s" % (server_host, (":" + str(server_port)) if server_port else "", path)
httpcon.request("POST", url, post_data)
except ConnectionError as e:
httpcon.close()
sys.stderr.write("Failed to send HTTP request to %s: %s\r\n" % (url, str(e)))
return {
"success" : False,
"http_failure" : True,
"message" : "Failed to upload game state to server %s: %s. Check your internet connection." % (url, str(e))
}
except Exception as e:
httpcon.close()
sys.stderr.write("Failed to send HTTP request to %s: %s\r\n" % (url, str(e)))
return { "success" : False, "http_failure" : True, "message" : str(e) }
try:
response = httpcon.getresponse()
except Exception as e:
sys.stderr.write("Failed to read response from %s: %s\r\n" % (url, str(e)))
httpcon.close()
return { "success" : False, "http_failure" : True, "message" : str(e) }
if response.status != 200:
sys.stderr.write("Failed to post data to %s: HTTP response %d: %s\r\n" % (url, response.status, response.reason))
rep = {
"success" : False,
"http_failure" : True,
"message" : "Failed to post update to server: HTTP %d: %s" % (response.status, response.reason)
}
else:
response_body = None
rep = None
try:
response_body = response.read()
except Exception as e:
sys.stderr.write("Failed to read response data from HTTP: " + str(e) + "\r\n")
rep = {
"success" : False,
"http_failure" : True,
"message" : str(e)
}
if response_body is not None:
try:
rep = json.loads(response_body.decode("utf-8"))
if not rep.get("success", False):
message = rep.get("message", "(none)")
sys.stderr.write("Update failed. Message: " + message + "\r\n")
except Exception as e:
sys.stderr.write("Failed to parse server response: " + str(e) + "\r\n")
rep = {
"success" : False,
"message" : "Server response was invalid JSON: " + str(e)
}
httpcon.close()
return rep
class UploaderThread(object):
def __init__(self):
self.uploading_tourneys = set()
self.tourney_upload_start_time = {}
self.tourney_last_upload_attempt_time = {}
self.tourney_last_uploaded_game_state = {}
self.tourney_num_viewers = {}
self.tourney_auth = {}
self.thread = threading.Thread(target=self.body)
self.thread.daemon = True
self.thread.start()
def is_uploading_tourney(self, tourney):
return (tourney in self.uploading_tourneys)
def add_tourney_to_upload_list(self, tourney, username, password, private):
self.uploading_tourneys.add(tourney)
self.tourney_auth[tourney] = { "username" : username, "password" : password, "private" : private }
self.tourney_upload_start_time[tourney] = int(time.time());
if tourney in self.tourney_last_uploaded_game_state:
del self.tourney_last_uploaded_game_state[tourney]
self.tourney_last_upload_attempt_time[tourney] = 0
def remove_tourney_from_upload_list(self, tourney):
self.uploading_tourneys.discard(tourney)
def get_last_successful_upload_time(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
upload_time = tourney.get_last_successful_upload_time()
# Don't return this time if it's before the user even pressed
# the "start uploading" button"
if upload_time is None or upload_time < self.tourney_upload_start_time.get(tourney_name, 0):
return None
else:
return upload_time
except countdowntourney.TourneyException as e:
sys.stderr.write("Failed to get last successful upload time: %s\n" % (str(e)))
return None
def get_last_failed_upload(self, tourney_name):
try:
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
failed_upload = tourney.get_last_failed_upload()
if failed_upload is not None and failed_upload.get("ts", None) is not None and failed_upload["ts"] >= self.tourney_upload_start_time.get(tourney_name, 0):
return failed_upload
else:
return None
except countdowntourney.TourneyException as e:
sys.stderr.write("Failed to get last failed upload info: %s\n" % (str(e)))
return None
def get_num_viewers(self, tourney_name):
return self.tourney_num_viewers.get(tourney_name, None)
def get_tourney_auth(self, tourney):
return self.tourney_auth.get(tourney)
def set_tourney_auth(self, tourney, username, password):
self.tourney_auth[tourney] = { "username" : username, "password" : password }
def get_upload_button_pressed_time(self, tourney):
if tourney not in self.uploading_tourneys:
return None
else:
return self.tourney_upload_start_time.get(tourney, None)
def write_log(self, message):
sys.stderr.write("%s: %s\r\n" % (time.strftime("%Y-%m-%d %H:%M:%S"), message))
def body(self):
while True:
uploading_tourneys = self.uploading_tourneys.copy()
for tourney_name in uploading_tourneys:
now = time.time()
last_upload_time = self.tourney_last_upload_attempt_time.get(tourney_name, 0)
if now >= last_upload_time + upload_interval_sec:
# Upload this tourney to the web if it's been at least
# upload_interval_sec seconds since the previous upload
# attempt.
try:
self.tourney_last_upload_attempt_time[tourney_name] = now
with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:
game_state = get_game_state(tourney)
tourney_unique_id = get_tourney_unique_id(tourney)
auth = self.tourney_auth.get(tourney_name, None)
if auth:
username = auth.get("username")
password = auth.get("password")
private = auth.get("private", False)
else:
username = None
password = None
private = False
req = {
"username" : username,
"password" : password,
"private" : private,
"unique_id" : tourney_unique_id,
"tourney" : tourney_name
}
# If the game state has changed since the last time
# we did a successful upload, include the new game
# state, otherwise we just submit a null update
# which only checks the server still works and
# reads how many current visitors there are.
if tourney_name not in self.tourney_last_uploaded_game_state or game_state != self.tourney_last_uploaded_game_state[tourney_name]:
req["state"] = game_state
# Send the submission to the server & get the reply
rep = make_https_json_request(http_server_host, http_server_port, http_submit_path, req)
num_viewers = None
if rep.get("success", False):
self.tourney_last_uploaded_game_state[tourney_name] = game_state
tourney.log_successful_upload()
if "state" in req:
self.write_log("Successfully uploaded state for tourney \"%s\"" % (tourney_name))
else:
self.write_log("No change since last upload of tourney \"%s\"" % (tourney_name))
num_viewers = rep.get("viewers", None)
if num_viewers is not None:
self.write_log("Server reports %d viewer%s." % (num_viewers, "s" if num_viewers != 1 else ""))
else:
if rep.get("http_failure", False):
failure_type = countdowntourney.UPLOAD_FAIL_TYPE_HTTP
else:
failure_type = countdowntourney.UPLOAD_FAIL_TYPE_REJECTED
tourney.log_failed_upload(failure_type, rep.get("message", "(no message)"))
self.write_log("Failed to upload state for tourney \"%s\": %s" % (tourney_name, rep.get("message", "(no message")))
self.tourney_num_viewers[tourney_name] = num_viewers
except countdowntourney.TourneyException as e:
self.write_log("UploaderThread: couldn't open tourney %s: %s" % (tourney_name, str(e)))
traceback.print_tb(e.__traceback__)
continue
except Exception as e:
self.write_log("Uploader thread threw exception: %s" % (str(e)))
traceback.print_tb(e.__traceback__)
continue
time.sleep(1)
class UploaderServiceHandler(BaseRequestHandler):
def get_fields_from_req(self, req, field_names):
field_values = []
for name in field_names:
value = req.get(name, None)
if value is None:
raise FieldNotFoundException()
field_values.append(value)
return tuple(field_values)
def process_request(self, req):
global uploader_thread
req_type = req.get("type", None)
if not req_type:
return make_error_response("Request has no request type")
req_body = req.get("request", None)
if req_body is None:
return make_error_response("Request has no body")
try:
if req_type == "start_uploading":
(tourney, username, password, private) = self.get_fields_from_req(req_body, ["tourney", "username", "password", "private"])
uploader_thread.add_tourney_to_upload_list(tourney, username, password, private)
rep = make_ok_response()
elif req_type == "stop_uploading":
(tourney,) = self.get_fields_from_req(req_body, ["tourney"])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = make_ok_response()
elif req_type == "delete":
(tourney, username, password) = self.get_fields_from_req(req_body, ["tourney", "username", "password"])
uploader_thread.remove_tourney_from_upload_list(tourney)
rep = delete_tourney_from_web(tourney, username, password)
uploader_thread.set_tourney_auth(tourney, username, password)
elif req_type == "status":
(tourney,) = self.get_fields_from_req(req_body, ["tourney"])
rep = { "success" : True }
auth = uploader_thread.get_tourney_auth(tourney)
rep["publishing"] = uploader_thread.is_uploading_tourney(tourney)
rep["viewers"] = uploader_thread.get_num_viewers(tourney)
if auth:
rep["username"] = auth.get("username", None)
rep["password"] = auth.get("password", None)
rep["private"] = auth.get("private", False)
rep["last_successful_upload_time"] = uploader_thread.get_last_successful_upload_time(tourney)
rep["last_failed_upload"] = uploader_thread.get_last_failed_upload(tourney)
rep["upload_button_pressed_time"] = uploader_thread.get_upload_button_pressed_time(tourney)
rep["now"] = int(time.time())
else:
rep = make_error_response("Unrecognised request type")
except FieldNotFoundException:
return make_error_response("Request is not valid for type")
return rep
def handle(self):
# Request is expected to be a JSON object, on a line by itself
line = read_line_from_socket(self.request)
if line is not None:
rep = None
try:
req = json.loads(line)
except Exception as e:
rep = make_error_response("Request is not valid JSON")
if not rep:
rep = self.process_request(req)
self.request.sendall((json.dumps(rep) + "\n").encode("utf-8"))
self.request.close()
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def __init__(self, addr_port, service_handler):
self.allow_reuse_address = True
super().__init__(addr_port, service_handler)
class TourneyUploaderService(object):
def __init__(self, listen_port):
global uploader_thread
self.listen_port = listen_port
self.socket_server = ThreadedTCPServer(("127.0.0.1", listen_port), UploaderServiceHandler)
self.server_thread = threading.Thread(target=self.socket_server.serve_forever)
if not uploader_thread:
uploader_thread = UploaderThread()
self.server_thread.daemon = True
self.server_thread.start()
def shutdown(self):
self.socket_server.shutdown()
|
normal
|
{
"blob_id": "bd202e18cb98efc2b62ce4670fadcf70c35a33cb",
"index": 2529,
"step-1": "<mask token>\n\n\nclass UploaderThread(object):\n <mask token>\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n <mask token>\n <mask token>\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-2": "<mask token>\n\n\nclass UploaderThread(object):\n <mask token>\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-3": "<mask token>\n\n\nclass FieldNotFoundException(Exception):\n pass\n\n\ndef make_error_response(message):\n return {'success': False, 'message': message}\n\n\n<mask token>\n\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {'username': username, 'password': password, 'tourney':\n tourney_name, 'delete': True}\n return make_https_json_request(http_server_host, http_server_port,\n http_delete_path, req)\n\n\n<mask token>\n\n\nclass UploaderThread(object):\n\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-4": "<mask token>\nif not db_dir:\n db_dir = os.path.join(os.getcwd(), 'tourneys')\n<mask token>\n\n\nclass FieldNotFoundException(Exception):\n pass\n\n\ndef make_error_response(message):\n return {'success': False, 'message': message}\n\n\ndef make_ok_response():\n return {'success': True}\n\n\ndef get_game_state(tourney):\n return tourney2json.get_state_for_upload(tourney)\n\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {'username': username, 'password': password, 'tourney':\n tourney_name, 'delete': True}\n return make_https_json_request(http_server_host, http_server_port,\n http_delete_path, req)\n\n\ndef read_line_from_socket(sock):\n byte_array = b''\n b = 0\n while b != b'\\n':\n b = sock.recv(1)\n if b is None or len(b) == 0:\n return None\n byte_array += b\n return byte_array.decode('utf-8')\n\n\ndef make_https_json_request(server_host, server_port, path, request):\n post_data = json.dumps(request)\n httpcon = None\n try:\n httpcon = http.client.HTTPSConnection(host=server_host, port=\n server_port, timeout=30)\n httpcon.connect()\n except Exception as e:\n if httpcon:\n httpcon.close()\n sys.stderr.write('Failed to connect to %s: %s\\r\\n' % (server_host,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': \n 'Failed to connect to %s: %s. Check your internet connection.' %\n (server_host, str(e))}\n try:\n while path and path[0] == '/':\n path = path[1:]\n url = 'https://%s%s/%s' % (server_host, ':' + str(server_port) if\n server_port else '', path)\n httpcon.request('POST', url, post_data)\n except ConnectionError as e:\n httpcon.close()\n sys.stderr.write('Failed to send HTTP request to %s: %s\\r\\n' % (url,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': \n 'Failed to upload game state to server %s: %s. Check your internet connection.'\n % (url, str(e))}\n except Exception as e:\n httpcon.close()\n sys.stderr.write('Failed to send HTTP request to %s: %s\\r\\n' % (url,\n str(e)))\n return {'success': False, 'http_failure': True, 'message': str(e)}\n try:\n response = httpcon.getresponse()\n except Exception as e:\n sys.stderr.write('Failed to read response from %s: %s\\r\\n' % (url,\n str(e)))\n httpcon.close()\n return {'success': False, 'http_failure': True, 'message': str(e)}\n if response.status != 200:\n sys.stderr.write(\n 'Failed to post data to %s: HTTP response %d: %s\\r\\n' % (url,\n response.status, response.reason))\n rep = {'success': False, 'http_failure': True, 'message': \n 'Failed to post update to server: HTTP %d: %s' % (response.\n status, response.reason)}\n else:\n response_body = None\n rep = None\n try:\n response_body = response.read()\n except Exception as e:\n sys.stderr.write('Failed to read response data from HTTP: ' +\n str(e) + '\\r\\n')\n rep = {'success': False, 'http_failure': True, 'message': str(e)}\n if response_body is not None:\n try:\n rep = json.loads(response_body.decode('utf-8'))\n if not rep.get('success', False):\n message = rep.get('message', '(none)')\n sys.stderr.write('Update failed. Message: ' + message +\n '\\r\\n')\n except Exception as e:\n sys.stderr.write('Failed to parse server response: ' + str(\n e) + '\\r\\n')\n rep = {'success': False, 'message': \n 'Server response was invalid JSON: ' + str(e)}\n httpcon.close()\n return rep\n\n\nclass UploaderThread(object):\n\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return tourney in self.uploading_tourneys\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = {'username': username, 'password':\n password, 'private': private}\n self.tourney_upload_start_time[tourney] = int(time.time())\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n if (upload_time is None or upload_time < self.\n tourney_upload_start_time.get(tourney_name, 0)):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\n 'Failed to get last successful upload time: %s\\n' % str(e))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get('ts', None\n ) is not None and failed_upload['ts'\n ] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write('Failed to get last failed upload info: %s\\n' %\n str(e))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = {'username': username, 'password':\n password}\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write('%s: %s\\r\\n' % (time.strftime('%Y-%m-%d %H:%M:%S'),\n message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(\n tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n try:\n self.tourney_last_upload_attempt_time[tourney_name\n ] = now\n with countdowntourney.tourney_open(tourney_name, db_dir\n ) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get('username')\n password = auth.get('password')\n private = auth.get('private', False)\n else:\n username = None\n password = None\n private = False\n req = {'username': username, 'password':\n password, 'private': private, 'unique_id':\n tourney_unique_id, 'tourney': tourney_name}\n if (tourney_name not in self.\n tourney_last_uploaded_game_state or \n game_state != self.\n tourney_last_uploaded_game_state[tourney_name]\n ):\n req['state'] = game_state\n rep = make_https_json_request(http_server_host,\n http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get('success', False):\n self.tourney_last_uploaded_game_state[\n tourney_name] = game_state\n tourney.log_successful_upload()\n if 'state' in req:\n self.write_log(\n 'Successfully uploaded state for tourney \"%s\"'\n % tourney_name)\n else:\n self.write_log(\n 'No change since last upload of tourney \"%s\"'\n % tourney_name)\n num_viewers = rep.get('viewers', None)\n if num_viewers is not None:\n self.write_log(\n 'Server reports %d viewer%s.' % (\n num_viewers, 's' if num_viewers != \n 1 else ''))\n else:\n if rep.get('http_failure', False):\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_HTTP)\n else:\n failure_type = (countdowntourney.\n UPLOAD_FAIL_TYPE_REJECTED)\n tourney.log_failed_upload(failure_type, rep\n .get('message', '(no message)'))\n self.write_log(\n 'Failed to upload state for tourney \"%s\": %s'\n % (tourney_name, rep.get('message',\n '(no message')))\n self.tourney_num_viewers[tourney_name\n ] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\n \"UploaderThread: couldn't open tourney %s: %s\" %\n (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\n 'Uploader thread threw exception: %s' % str(e))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\n\nclass UploaderServiceHandler(BaseRequestHandler):\n\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n req_type = req.get('type', None)\n if not req_type:\n return make_error_response('Request has no request type')\n req_body = req.get('request', None)\n if req_body is None:\n return make_error_response('Request has no body')\n try:\n if req_type == 'start_uploading':\n tourney, username, password, private = (self.\n get_fields_from_req(req_body, ['tourney', 'username',\n 'password', 'private']))\n uploader_thread.add_tourney_to_upload_list(tourney,\n username, password, private)\n rep = make_ok_response()\n elif req_type == 'stop_uploading':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == 'delete':\n tourney, username, password = self.get_fields_from_req(req_body\n , ['tourney', 'username', 'password'])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == 'status':\n tourney, = self.get_fields_from_req(req_body, ['tourney'])\n rep = {'success': True}\n auth = uploader_thread.get_tourney_auth(tourney)\n rep['publishing'] = uploader_thread.is_uploading_tourney(\n tourney)\n rep['viewers'] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep['username'] = auth.get('username', None)\n rep['password'] = auth.get('password', None)\n rep['private'] = auth.get('private', False)\n rep['last_successful_upload_time'\n ] = uploader_thread.get_last_successful_upload_time(tourney\n )\n rep['last_failed_upload'\n ] = uploader_thread.get_last_failed_upload(tourney)\n rep['upload_button_pressed_time'\n ] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep['now'] = int(time.time())\n else:\n rep = make_error_response('Unrecognised request type')\n except FieldNotFoundException:\n return make_error_response('Request is not valid for type')\n return rep\n\n def handle(self):\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response('Request is not valid JSON')\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + '\\n').encode('utf-8'))\n self.request.close()\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\n\nclass TourneyUploaderService(object):\n\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer(('127.0.0.1', listen_port),\n UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.\n serve_forever)\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-5": "#!/usr/bin/python3\n\n# The uploader service listens for connections from localhost on port 3961.\n# It expects a JSON object on a line by itself as the request. It responds\n# with another JSON object on a line by itself, then closes the connection.\n# Atropine CGI scripts can send requests to this service to tell it to:\n# * Add a tourney to the list of tourneys we're periodically uploading to\n# greem.co.uk\n# * Remove a tourney from that list (i.e. stop uploading it)\n# * Get the upload state of a tourney (are we uploading it, when was the\n# last successful upload, was the last upload successful, and if not what\n# went wrong)\n#\n# The service is started with atropine.py, and runs alongside the web server\n# which serves the web interface used by the tournament administrator. At\n# startup, no tourneys are being uploaded.\n\nimport sys\nimport os\nimport socketserver\nfrom socketserver import BaseRequestHandler\nimport json\nimport threading\nimport time\nimport http.client\nimport traceback\n\nhttp_server_host = \"greem.co.uk\"\nhttp_server_port = None\nhttp_submit_path = \"/cgi-bin/colive/submit.py\"\nhttp_delete_path = \"/cgi-bin/colive/submit.py\"\n\nupload_interval_sec = 10\n\ndb_dir = os.getenv(\"TOURNEYSPATH\")\nif not db_dir:\n db_dir = os.path.join(os.getcwd(), \"tourneys\")\n\nimport tourney2json\nimport countdowntourney\n\nuploader_thread = None\n\nclass FieldNotFoundException(Exception):\n pass\n\ndef make_error_response(message):\n return { \"success\" : False, \"message\" : message }\n\ndef make_ok_response():\n return { \"success\" : True }\n\ndef get_game_state(tourney):\n return tourney2json.get_state_for_upload(tourney)\n\ndef get_tourney_unique_id(tourney):\n return tourney.get_unique_id()\n\ndef delete_tourney_from_web(tourney_name, username, password):\n req = {\n \"username\" : username,\n \"password\" : password,\n \"tourney\" : tourney_name,\n \"delete\" : True\n }\n return make_https_json_request(http_server_host, http_server_port, http_delete_path, req)\n\ndef read_line_from_socket(sock):\n byte_array = b'';\n b = 0\n while b != b'\\n':\n b = sock.recv(1)\n if b is None or len(b) == 0:\n return None\n byte_array += b\n return byte_array.decode(\"utf-8\")\n\ndef make_https_json_request(server_host, server_port, path, request):\n post_data = json.dumps(request)\n httpcon = None\n try:\n httpcon = http.client.HTTPSConnection(host=server_host, port=server_port, timeout=30)\n httpcon.connect()\n except Exception as e:\n if httpcon:\n httpcon.close()\n sys.stderr.write(\"Failed to connect to %s: %s\\r\\n\" % (server_host, str(e)))\n return { \"success\" : False, \"http_failure\" : True, \"message\" : \"Failed to connect to %s: %s. Check your internet connection.\" % (server_host, str(e)) }\n\n try:\n while path and path[0] == '/':\n path = path[1:]\n url = \"https://%s%s/%s\" % (server_host, (\":\" + str(server_port)) if server_port else \"\", path)\n httpcon.request(\"POST\", url, post_data)\n except ConnectionError as e:\n httpcon.close()\n sys.stderr.write(\"Failed to send HTTP request to %s: %s\\r\\n\" % (url, str(e)))\n return {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : \"Failed to upload game state to server %s: %s. Check your internet connection.\" % (url, str(e))\n }\n except Exception as e:\n httpcon.close()\n sys.stderr.write(\"Failed to send HTTP request to %s: %s\\r\\n\" % (url, str(e)))\n return { \"success\" : False, \"http_failure\" : True, \"message\" : str(e) }\n\n try:\n response = httpcon.getresponse()\n except Exception as e:\n sys.stderr.write(\"Failed to read response from %s: %s\\r\\n\" % (url, str(e)))\n httpcon.close()\n return { \"success\" : False, \"http_failure\" : True, \"message\" : str(e) }\n\n if response.status != 200:\n sys.stderr.write(\"Failed to post data to %s: HTTP response %d: %s\\r\\n\" % (url, response.status, response.reason))\n rep = {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : \"Failed to post update to server: HTTP %d: %s\" % (response.status, response.reason)\n }\n else:\n response_body = None\n rep = None\n try:\n response_body = response.read()\n except Exception as e:\n sys.stderr.write(\"Failed to read response data from HTTP: \" + str(e) + \"\\r\\n\")\n rep = {\n \"success\" : False,\n \"http_failure\" : True,\n \"message\" : str(e)\n }\n if response_body is not None:\n try:\n rep = json.loads(response_body.decode(\"utf-8\"))\n if not rep.get(\"success\", False):\n message = rep.get(\"message\", \"(none)\")\n sys.stderr.write(\"Update failed. Message: \" + message + \"\\r\\n\")\n except Exception as e:\n sys.stderr.write(\"Failed to parse server response: \" + str(e) + \"\\r\\n\")\n rep = {\n \"success\" : False,\n \"message\" : \"Server response was invalid JSON: \" + str(e)\n }\n httpcon.close()\n return rep\n\n\nclass UploaderThread(object):\n def __init__(self):\n self.uploading_tourneys = set()\n self.tourney_upload_start_time = {}\n self.tourney_last_upload_attempt_time = {}\n self.tourney_last_uploaded_game_state = {}\n self.tourney_num_viewers = {}\n self.tourney_auth = {}\n self.thread = threading.Thread(target=self.body)\n self.thread.daemon = True\n self.thread.start()\n\n def is_uploading_tourney(self, tourney):\n return (tourney in self.uploading_tourneys)\n\n def add_tourney_to_upload_list(self, tourney, username, password, private):\n self.uploading_tourneys.add(tourney)\n self.tourney_auth[tourney] = { \"username\" : username, \"password\" : password, \"private\" : private }\n self.tourney_upload_start_time[tourney] = int(time.time());\n if tourney in self.tourney_last_uploaded_game_state:\n del self.tourney_last_uploaded_game_state[tourney]\n self.tourney_last_upload_attempt_time[tourney] = 0\n\n def remove_tourney_from_upload_list(self, tourney):\n self.uploading_tourneys.discard(tourney)\n\n def get_last_successful_upload_time(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n upload_time = tourney.get_last_successful_upload_time()\n\n # Don't return this time if it's before the user even pressed\n # the \"start uploading\" button\"\n if upload_time is None or upload_time < self.tourney_upload_start_time.get(tourney_name, 0):\n return None\n else:\n return upload_time\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\"Failed to get last successful upload time: %s\\n\" % (str(e)))\n return None\n\n def get_last_failed_upload(self, tourney_name):\n try:\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n failed_upload = tourney.get_last_failed_upload()\n if failed_upload is not None and failed_upload.get(\"ts\", None) is not None and failed_upload[\"ts\"] >= self.tourney_upload_start_time.get(tourney_name, 0):\n return failed_upload\n else:\n return None\n except countdowntourney.TourneyException as e:\n sys.stderr.write(\"Failed to get last failed upload info: %s\\n\" % (str(e)))\n return None\n\n def get_num_viewers(self, tourney_name):\n return self.tourney_num_viewers.get(tourney_name, None)\n\n def get_tourney_auth(self, tourney):\n return self.tourney_auth.get(tourney)\n\n def set_tourney_auth(self, tourney, username, password):\n self.tourney_auth[tourney] = { \"username\" : username, \"password\" : password }\n\n def get_upload_button_pressed_time(self, tourney):\n if tourney not in self.uploading_tourneys:\n return None\n else:\n return self.tourney_upload_start_time.get(tourney, None)\n\n def write_log(self, message):\n sys.stderr.write(\"%s: %s\\r\\n\" % (time.strftime(\"%Y-%m-%d %H:%M:%S\"), message))\n\n def body(self):\n while True:\n uploading_tourneys = self.uploading_tourneys.copy()\n for tourney_name in uploading_tourneys:\n now = time.time()\n last_upload_time = self.tourney_last_upload_attempt_time.get(tourney_name, 0)\n if now >= last_upload_time + upload_interval_sec:\n # Upload this tourney to the web if it's been at least\n # upload_interval_sec seconds since the previous upload\n # attempt.\n try:\n self.tourney_last_upload_attempt_time[tourney_name] = now\n with countdowntourney.tourney_open(tourney_name, db_dir) as tourney:\n game_state = get_game_state(tourney)\n tourney_unique_id = get_tourney_unique_id(tourney)\n auth = self.tourney_auth.get(tourney_name, None)\n if auth:\n username = auth.get(\"username\")\n password = auth.get(\"password\")\n private = auth.get(\"private\", False)\n else:\n username = None\n password = None\n private = False\n req = {\n \"username\" : username,\n \"password\" : password,\n \"private\" : private,\n \"unique_id\" : tourney_unique_id,\n \"tourney\" : tourney_name\n }\n\n # If the game state has changed since the last time\n # we did a successful upload, include the new game\n # state, otherwise we just submit a null update\n # which only checks the server still works and\n # reads how many current visitors there are.\n if tourney_name not in self.tourney_last_uploaded_game_state or game_state != self.tourney_last_uploaded_game_state[tourney_name]:\n req[\"state\"] = game_state\n\n # Send the submission to the server & get the reply\n rep = make_https_json_request(http_server_host, http_server_port, http_submit_path, req)\n num_viewers = None\n if rep.get(\"success\", False):\n self.tourney_last_uploaded_game_state[tourney_name] = game_state\n tourney.log_successful_upload()\n if \"state\" in req:\n self.write_log(\"Successfully uploaded state for tourney \\\"%s\\\"\" % (tourney_name))\n else:\n self.write_log(\"No change since last upload of tourney \\\"%s\\\"\" % (tourney_name))\n num_viewers = rep.get(\"viewers\", None)\n if num_viewers is not None:\n self.write_log(\"Server reports %d viewer%s.\" % (num_viewers, \"s\" if num_viewers != 1 else \"\"))\n else:\n if rep.get(\"http_failure\", False):\n failure_type = countdowntourney.UPLOAD_FAIL_TYPE_HTTP\n else:\n failure_type = countdowntourney.UPLOAD_FAIL_TYPE_REJECTED\n tourney.log_failed_upload(failure_type, rep.get(\"message\", \"(no message)\"))\n self.write_log(\"Failed to upload state for tourney \\\"%s\\\": %s\" % (tourney_name, rep.get(\"message\", \"(no message\")))\n self.tourney_num_viewers[tourney_name] = num_viewers\n except countdowntourney.TourneyException as e:\n self.write_log(\"UploaderThread: couldn't open tourney %s: %s\" % (tourney_name, str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n except Exception as e:\n self.write_log(\"Uploader thread threw exception: %s\" % (str(e)))\n traceback.print_tb(e.__traceback__)\n continue\n time.sleep(1)\n\nclass UploaderServiceHandler(BaseRequestHandler):\n def get_fields_from_req(self, req, field_names):\n field_values = []\n for name in field_names:\n value = req.get(name, None)\n if value is None:\n raise FieldNotFoundException()\n field_values.append(value)\n return tuple(field_values)\n\n def process_request(self, req):\n global uploader_thread\n\n req_type = req.get(\"type\", None)\n if not req_type:\n return make_error_response(\"Request has no request type\")\n req_body = req.get(\"request\", None)\n if req_body is None:\n return make_error_response(\"Request has no body\")\n\n try:\n if req_type == \"start_uploading\":\n (tourney, username, password, private) = self.get_fields_from_req(req_body, [\"tourney\", \"username\", \"password\", \"private\"])\n uploader_thread.add_tourney_to_upload_list(tourney, username, password, private)\n rep = make_ok_response()\n elif req_type == \"stop_uploading\":\n (tourney,) = self.get_fields_from_req(req_body, [\"tourney\"])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = make_ok_response()\n elif req_type == \"delete\":\n (tourney, username, password) = self.get_fields_from_req(req_body, [\"tourney\", \"username\", \"password\"])\n uploader_thread.remove_tourney_from_upload_list(tourney)\n rep = delete_tourney_from_web(tourney, username, password)\n uploader_thread.set_tourney_auth(tourney, username, password)\n elif req_type == \"status\":\n (tourney,) = self.get_fields_from_req(req_body, [\"tourney\"])\n rep = { \"success\" : True }\n auth = uploader_thread.get_tourney_auth(tourney)\n rep[\"publishing\"] = uploader_thread.is_uploading_tourney(tourney)\n rep[\"viewers\"] = uploader_thread.get_num_viewers(tourney)\n if auth:\n rep[\"username\"] = auth.get(\"username\", None)\n rep[\"password\"] = auth.get(\"password\", None)\n rep[\"private\"] = auth.get(\"private\", False)\n rep[\"last_successful_upload_time\"] = uploader_thread.get_last_successful_upload_time(tourney)\n rep[\"last_failed_upload\"] = uploader_thread.get_last_failed_upload(tourney)\n rep[\"upload_button_pressed_time\"] = uploader_thread.get_upload_button_pressed_time(tourney)\n rep[\"now\"] = int(time.time())\n else:\n rep = make_error_response(\"Unrecognised request type\")\n except FieldNotFoundException:\n return make_error_response(\"Request is not valid for type\")\n\n return rep\n\n def handle(self):\n # Request is expected to be a JSON object, on a line by itself\n line = read_line_from_socket(self.request)\n if line is not None:\n rep = None\n try:\n req = json.loads(line)\n except Exception as e:\n rep = make_error_response(\"Request is not valid JSON\")\n\n if not rep:\n rep = self.process_request(req)\n self.request.sendall((json.dumps(rep) + \"\\n\").encode(\"utf-8\"))\n\n self.request.close()\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n def __init__(self, addr_port, service_handler):\n self.allow_reuse_address = True\n super().__init__(addr_port, service_handler)\n\nclass TourneyUploaderService(object):\n def __init__(self, listen_port):\n global uploader_thread\n self.listen_port = listen_port\n self.socket_server = ThreadedTCPServer((\"127.0.0.1\", listen_port), UploaderServiceHandler)\n self.server_thread = threading.Thread(target=self.socket_server.serve_forever)\n\n if not uploader_thread:\n uploader_thread = UploaderThread()\n self.server_thread.daemon = True\n self.server_thread.start()\n\n def shutdown(self):\n self.socket_server.shutdown()\n",
"step-ids": [
19,
21,
26,
31,
34
]
}
|
[
19,
21,
26,
31,
34
] |
#!/usr/bin/env python
#-*-coding:utf-8-*-
#author:wuya
import os
import xlrd
import json
class Helper(object):
'''公共方法'''
def base_dir(self,filePath,folder='data'):
'''
返回公共路径
:parameter folder:文件夹
:parameter filePath:文件名称
'''
return os.path.join(
os.path.dirname(
os.path.dirname(__file__)),
folder,filePath)
def readExcel(self,rowx,filePath='data.xlsx'):
'''
读取excel中数据并且返回
:parameter filePath:xlsx文件名称
:parameter rowx:在excel中的行数
'''
book=xlrd.open_workbook(self.base_dir(filePath))
sheet=book.sheet_by_index(0)
return sheet.row_values(rowx)
def getUrl(self,rowx):
'''
获取请求地址
:parameter rowx:在excel中的行数
'''
return self.readExcel(rowx)[1]
def getData(self,rowx):
'''
获取数据并且返回
:parameter rowx:在excel中的行数
'''
return json.loads(self.readExcel(rowx)[2])
|
normal
|
{
"blob_id": "7c2349810fc757848eeb5bddef4640d87d5f9ab9",
"index": 2439,
"step-1": "<mask token>\n\n\nclass Helper(object):\n <mask token>\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n <mask token>\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Helper(object):\n <mask token>\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n <mask token>\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-3": "<mask token>\n\n\nclass Helper(object):\n \"\"\"公共方法\"\"\"\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n\n def readExcel(self, rowx, filePath='data.xlsx'):\n \"\"\"\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n \"\"\"\n book = xlrd.open_workbook(self.base_dir(filePath))\n sheet = book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-4": "import os\nimport xlrd\nimport json\n\n\nclass Helper(object):\n \"\"\"公共方法\"\"\"\n\n def base_dir(self, filePath, folder='data'):\n \"\"\"\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n \"\"\"\n return os.path.join(os.path.dirname(os.path.dirname(__file__)),\n folder, filePath)\n\n def readExcel(self, rowx, filePath='data.xlsx'):\n \"\"\"\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n \"\"\"\n book = xlrd.open_workbook(self.base_dir(filePath))\n sheet = book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self, rowx):\n \"\"\"\n 获取请求地址\n :parameter rowx:在excel中的行数\n \"\"\"\n return self.readExcel(rowx)[1]\n\n def getData(self, rowx):\n \"\"\"\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n \"\"\"\n return json.loads(self.readExcel(rowx)[2])\n",
"step-5": "#!/usr/bin/env python\n#-*-coding:utf-8-*-\n\n#author:wuya\n\n\nimport os\nimport xlrd\nimport json\n\n\nclass Helper(object):\n '''公共方法'''\n\n def base_dir(self,filePath,folder='data'):\n '''\n 返回公共路径\n :parameter folder:文件夹\n :parameter filePath:文件名称\n '''\n return os.path.join(\n os.path.dirname(\n os.path.dirname(__file__)),\n folder,filePath)\n\n def readExcel(self,rowx,filePath='data.xlsx'):\n '''\n 读取excel中数据并且返回\n :parameter filePath:xlsx文件名称\n :parameter rowx:在excel中的行数\n '''\n book=xlrd.open_workbook(self.base_dir(filePath))\n sheet=book.sheet_by_index(0)\n return sheet.row_values(rowx)\n\n def getUrl(self,rowx):\n '''\n 获取请求地址\n :parameter rowx:在excel中的行数\n '''\n return self.readExcel(rowx)[1]\n\n def getData(self,rowx):\n '''\n 获取数据并且返回\n :parameter rowx:在excel中的行数\n '''\n return json.loads(self.readExcel(rowx)[2])\n\n\n\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
import re
import pandas as pd
import pandas.io.formats.excel
from configparser import ConfigParser
from datetime import datetime
from termcolor import cprint
import os
import shutil
from openpyxl import load_workbook
import numpy as np
class pairtron():
def affiliation_cleaner(self, affiliation):
# print(affiliation)
affiliation = str(affiliation)
affiliation = affiliation.strip(" ;").replace(" ", " ").replace(" "," ")
while ' ;' in affiliation:
affiliation = affiliation.replace(" ;", ";")
while ';;' in affiliation:
affiliation = affiliation.replace(";;", ";")
return affiliation
def zeta0_creation(self, indexed_files_dir, merge_columns):
""" Returns pandas dataframe which has latest record for each manual id after merging all "sheet_name"
in the previously indexed_files which are present in "indexed_files_dir"
"""
indexed_files = [file for file in os.listdir(indexed_files_dir) if not file.startswith("~")]
indexed_files_dict = {}
indexed_files_dict.clear()
dateList = []
del dateList[:]
for file in indexed_files:
dated = file.split('_')[-1].split('.')[0]
dated = dated[4:] + dated[:4]
dateList.append(dated)
indexed_files_dict[dated] = file
dataframes = {}
for dated, file in indexed_files_dict.items():
file_name = indexed_files_dir + '\\' + file
dataframes[dated] = pd.read_excel(file_name, sheet_name=0)
dataframes[dated]['file_date'] = dated
dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in dataframes[dated]['manual_id']]
merged_df = pd.concat([dataframes[dated] for dated in dateList], ignore_index=True)
merged_df = merged_df.sort_values('file_date', ascending=False)
zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')
pd.set_option('mode.chained_assignment', None)
for col in zeta0.columns:
zeta0[col] = zeta0[col].astype('str')
zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
zeta0 = zeta0.sort_values('mid', ascending=True)
if "manual_id" not in merge_columns:
merge_columns.append("manual_id")
zeta0 = zeta0[merge_columns]
# print(zeta0)
return zeta0
def copy_larvol_xlsx(self, template, acronym):
date = datetime.now().date().strftime('%m%d%Y')
self.dest_file = os.path.basename(template).replace('ACRONYM',acronym).replace('MMDDYYYY', date + '_Pairtron')
shutil.copy2(template, self.dest_file)
def selectionAfterJoin(self, df, cols, common_cols):
for col in common_cols:
if col != 'manual_id':
df[col] = np.where(df['{}_left'.format(col)].isnull() | ((df['{}_right'.format(col)].notnull()) & (df['{}_right'.format(col)] != '') & (df['{}_left'.format(col)] != df['{}_right'.format(col)])), df['{}_right'.format(col)], df['{}_left'.format(col)])
drop_list = ['{}_left'.format(col) for col in common_cols if col != 'manual_id']
drop_list.extend(['{}_right'.format(col) for col in common_cols if col != 'manual_id'])
df.drop(drop_list, axis=1, inplace=True)
return df[cols]
def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):
wb = load_workbook(filename=self.dest_file)
ws = wb[sheets[0]]
ws.title = sheets[0].replace('ACRONYM',acronym)
try:
curr_df = pd.read_excel(src)
except:
curr_df = pd.read_csv(src)
if zeta0_df is not None:
curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id', right_on='manual_id', how='left', suffixes=('_left', '_right'))
common_columns = [col for col in curr_df.columns if col in zeta0_df.columns]
self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns, common_columns)
else:
self.source_df = curr_df
session_list = self.source_df.fillna('').values.tolist()
for row_iter in range(len(session_list)):
print(row_iter)
for col_iter in range(len(session_list[row_iter])):
print(col_iter)
ws.cell(row=row_iter+2, column=col_iter+1).value = self.affiliation_cleaner(session_list[row_iter][col_iter])
wb.save(self.dest_file)
def process_merger(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):
self.copy_larvol_xlsx(template, acronym)
if merge.upper() == 'YES':
zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)
self.update_larvol_xlsx(source_file, acronym, sheets, columns, zeta0)
else:
self.update_larvol_xlsx(source_file, acronym, sheets, columns)
def separated_by_number(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
affiliations_dict = {}
prev_affiliation = None
for affiliation in affiliations_list:
#print(manual_id)
#print(affiliation)
if affiliation != '':
group = re.findall(r'\d+', affiliation)
#print(group)
if group != []:
num = list(map(int, group))[0]
affiliations_dict[str(num)] = str(num).join(affiliation.split(str(num))[1:]).strip(',. ')
prev_affiliation = num
elif prev_affiliation is not None:
num = prev_affiliation
affiliations_dict[str(num)] = affiliations_dict[str(num)] + '; ' + affiliation.strip(',. ')
prev_affiliation = num
for author in authors_list:
#print(author)
group = re.findall(r'\d+', author)
num_list = list(map(int, group))
#print(num_list)
if num_list != []:
author_name = author.split(str(num_list[0]))[0].strip(',.-; ')
else:
author_name = author.strip(',.-; ')
#print(author_name)
for num in num_list:
try:
elem = affiliations_dict[str(num)]
except:
affiliations_dict[str(num)] = ''
cprint("Exception for manual_id: {} as affiliation index {} wasn't found".format(manual_id, str(num)), 'yellow', attrs=['bold'])
affiliation_name = '; '.join([affiliations_dict[str(num)].strip(',.- ') for num in num_list])
#print(affiliation_name)
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def separated_by_semicolon(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
for iter in range(len(authors_list)):
author_name = authors_list[iter].strip(',.-; ')
try:
affiliation_name = affiliations_list[iter].strip(',.- ')
except:
affiliation_name = ''
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def common_affiliation(self, source_id, manual_id, authors_list, affiliations_list):
separated_authors_list = []
for iter in range(len(authors_list)):
author_name = authors_list[iter].strip(',.-; ')
affiliation_name = affiliations_list[0].strip(',.- ')
print(affiliation_name)
separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])
return separated_authors_list
def process_pairtron(self, sheet):
source_df = self.source_df
source_df = source_df[source_df['authors'].notnull()]
source_id_list = source_df['source_id'].fillna('').tolist()
manual_id_list = source_df['manual_id'].fillna('').tolist()
authors_list = source_df['authors'].tolist()
affiliation_list = source_df['author_affiliation'].fillna('').tolist()
pairtron_list = []
for iter in range(len(authors_list)):
#print(iter, manual_id_list[iter])
author_tokens = [elem.strip() for elem in authors_list[iter].split(';')]
affiliation_tokens = [elem.strip() for elem in affiliation_list[iter].split(';')]
try:
if author_tokens[0][-1].isdigit() and '1' in affiliation_list[iter]:
pairtron_list.extend(self.separated_by_number(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
elif len(author_tokens) == len(affiliation_tokens):
pairtron_list.extend(self.separated_by_semicolon(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
elif author_tokens[0][-1].isdigit() and '1' not in affiliation_list[iter]:
cprint("ALERT: manual_id: {} has missing affiliations.".format(manual_id_list[iter]), 'red', attrs=['bold'])
else:
pairtron_list.extend(self.common_affiliation(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))
except:
pass
df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id', 'authors', 'author_affiliation'])
df.drop_duplicates(inplace = True)
authorsInfo_list = df.values.tolist()
wb = load_workbook(filename=self.dest_file)
ws = wb[sheet]
for row_iter in range(len(authorsInfo_list)):
for col_iter in range(len(authorsInfo_list[row_iter])):
ws.cell(row=row_iter+2, column=col_iter+1).value = authorsInfo_list[row_iter][col_iter]
wb.save(self.dest_file)
def processData(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):
self.process_merger(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)
self.process_pairtron(sheets[1])
if __name__ == "__main__":
start = datetime.now()
print ("Script Start Time ",start)
print ("Script Running.....\n")
parser = ConfigParser()
parser.read('pairtron_config.ini')
source_file = parser.get('dynamic_fields', 'source_file')
acronym = parser.get('dynamic_fields', 'ACRONYM')
merge = parser.get('dynamic_fields', 'merge')
merge_columns = [elem.strip() for elem in parser.get('dynamic_fields', 'merge_columns').split(',')]
template = parser.get('static_fields', 'template')
sheets = parser.get('static_fields', 'sheets').split(',')
indexed_files_dir = parser.get('static_fields', 'indexed_files_dir')
columns = parser.get('static_fields', 'columns').split(',')
obj = pairtron()
obj.processData(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)
total_time = datetime.now() - start
print ("\nScript End Time ",datetime.now())
print ("Execution Time", total_time)
|
normal
|
{
"blob_id": "fbab5826f47163cf82b534d311eae572c5fcd128",
"index": 3287,
"step-1": "<mask token>\n\n\nclass pairtron:\n\n def affiliation_cleaner(self, affiliation):\n affiliation = str(affiliation)\n affiliation = affiliation.strip(' ;').replace(' ', ' ').replace(' ',\n ' ')\n while ' ;' in affiliation:\n affiliation = affiliation.replace(' ;', ';')\n while ';;' in affiliation:\n affiliation = affiliation.replace(';;', ';')\n return affiliation\n\n def zeta0_creation(self, indexed_files_dir, merge_columns):\n \"\"\" Returns pandas dataframe which has latest record for each manual id after merging all \"sheet_name\"\n in the previously indexed_files which are present in \"indexed_files_dir\"\n \"\"\"\n indexed_files = [file for file in os.listdir(indexed_files_dir) if \n not file.startswith('~')]\n indexed_files_dict = {}\n indexed_files_dict.clear()\n dateList = []\n del dateList[:]\n for file in indexed_files:\n dated = file.split('_')[-1].split('.')[0]\n dated = dated[4:] + dated[:4]\n dateList.append(dated)\n indexed_files_dict[dated] = file\n dataframes = {}\n for dated, file in indexed_files_dict.items():\n file_name = indexed_files_dir + '\\\\' + file\n dataframes[dated] = pd.read_excel(file_name, sheet_name=0)\n dataframes[dated]['file_date'] = dated\n dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in\n dataframes[dated]['manual_id']]\n merged_df = pd.concat([dataframes[dated] for dated in dateList],\n ignore_index=True)\n merged_df = merged_df.sort_values('file_date', ascending=False)\n zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')\n pd.set_option('mode.chained_assignment', None)\n for col in zeta0.columns:\n zeta0[col] = zeta0[col].astype('str')\n zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == 'object' else\n x)\n zeta0 = zeta0.sort_values('mid', ascending=True)\n if 'manual_id' not in merge_columns:\n merge_columns.append('manual_id')\n zeta0 = zeta0[merge_columns]\n return zeta0\n <mask token>\n\n def selectionAfterJoin(self, df, cols, common_cols):\n for col in common_cols:\n if col != 'manual_id':\n df[col] = np.where(df['{}_left'.format(col)].isnull() | df[\n '{}_right'.format(col)].notnull() & (df['{}_right'.\n format(col)] != '') & (df['{}_left'.format(col)] != df[\n '{}_right'.format(col)]), df['{}_right'.format(col)],\n df['{}_left'.format(col)])\n drop_list = ['{}_left'.format(col) for col in common_cols if col !=\n 'manual_id']\n drop_list.extend(['{}_right'.format(col) for col in common_cols if \n col != 'manual_id'])\n df.drop(drop_list, axis=1, inplace=True)\n return df[cols]\n\n def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheets[0]]\n ws.title = sheets[0].replace('ACRONYM', acronym)\n try:\n curr_df = pd.read_excel(src)\n except:\n curr_df = pd.read_csv(src)\n if zeta0_df is not None:\n curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id',\n right_on='manual_id', how='left', suffixes=('_left', '_right'))\n common_columns = [col for col in curr_df.columns if col in\n zeta0_df.columns]\n self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns,\n common_columns)\n else:\n self.source_df = curr_df\n session_list = self.source_df.fillna('').values.tolist()\n for row_iter in range(len(session_list)):\n print(row_iter)\n for col_iter in range(len(session_list[row_iter])):\n print(col_iter)\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = self.affiliation_cleaner(session_list[\n row_iter][col_iter])\n wb.save(self.dest_file)\n\n def process_merger(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.copy_larvol_xlsx(template, acronym)\n if merge.upper() == 'YES':\n zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)\n self.update_larvol_xlsx(source_file, acronym, sheets, columns,\n zeta0)\n else:\n self.update_larvol_xlsx(source_file, acronym, sheets, columns)\n\n def separated_by_number(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n affiliations_dict = {}\n prev_affiliation = None\n for affiliation in affiliations_list:\n if affiliation != '':\n group = re.findall('\\\\d+', affiliation)\n if group != []:\n num = list(map(int, group))[0]\n affiliations_dict[str(num)] = str(num).join(affiliation\n .split(str(num))[1:]).strip(',. ')\n prev_affiliation = num\n elif prev_affiliation is not None:\n num = prev_affiliation\n affiliations_dict[str(num)] = affiliations_dict[str(num)\n ] + '; ' + affiliation.strip(',. ')\n prev_affiliation = num\n for author in authors_list:\n group = re.findall('\\\\d+', author)\n num_list = list(map(int, group))\n if num_list != []:\n author_name = author.split(str(num_list[0]))[0].strip(',.-; ')\n else:\n author_name = author.strip(',.-; ')\n for num in num_list:\n try:\n elem = affiliations_dict[str(num)]\n except:\n affiliations_dict[str(num)] = ''\n cprint(\n \"Exception for manual_id: {} as affiliation index {} wasn't found\"\n .format(manual_id, str(num)), 'yellow', attrs=['bold'])\n affiliation_name = '; '.join([affiliations_dict[str(num)].strip\n (',.- ') for num in num_list])\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def separated_by_semicolon(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n try:\n affiliation_name = affiliations_list[iter].strip(',.- ')\n except:\n affiliation_name = ''\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n <mask token>\n <mask token>\n\n def processData(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.process_merger(source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns)\n self.process_pairtron(sheets[1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass pairtron:\n\n def affiliation_cleaner(self, affiliation):\n affiliation = str(affiliation)\n affiliation = affiliation.strip(' ;').replace(' ', ' ').replace(' ',\n ' ')\n while ' ;' in affiliation:\n affiliation = affiliation.replace(' ;', ';')\n while ';;' in affiliation:\n affiliation = affiliation.replace(';;', ';')\n return affiliation\n\n def zeta0_creation(self, indexed_files_dir, merge_columns):\n \"\"\" Returns pandas dataframe which has latest record for each manual id after merging all \"sheet_name\"\n in the previously indexed_files which are present in \"indexed_files_dir\"\n \"\"\"\n indexed_files = [file for file in os.listdir(indexed_files_dir) if \n not file.startswith('~')]\n indexed_files_dict = {}\n indexed_files_dict.clear()\n dateList = []\n del dateList[:]\n for file in indexed_files:\n dated = file.split('_')[-1].split('.')[0]\n dated = dated[4:] + dated[:4]\n dateList.append(dated)\n indexed_files_dict[dated] = file\n dataframes = {}\n for dated, file in indexed_files_dict.items():\n file_name = indexed_files_dir + '\\\\' + file\n dataframes[dated] = pd.read_excel(file_name, sheet_name=0)\n dataframes[dated]['file_date'] = dated\n dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in\n dataframes[dated]['manual_id']]\n merged_df = pd.concat([dataframes[dated] for dated in dateList],\n ignore_index=True)\n merged_df = merged_df.sort_values('file_date', ascending=False)\n zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')\n pd.set_option('mode.chained_assignment', None)\n for col in zeta0.columns:\n zeta0[col] = zeta0[col].astype('str')\n zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == 'object' else\n x)\n zeta0 = zeta0.sort_values('mid', ascending=True)\n if 'manual_id' not in merge_columns:\n merge_columns.append('manual_id')\n zeta0 = zeta0[merge_columns]\n return zeta0\n <mask token>\n\n def selectionAfterJoin(self, df, cols, common_cols):\n for col in common_cols:\n if col != 'manual_id':\n df[col] = np.where(df['{}_left'.format(col)].isnull() | df[\n '{}_right'.format(col)].notnull() & (df['{}_right'.\n format(col)] != '') & (df['{}_left'.format(col)] != df[\n '{}_right'.format(col)]), df['{}_right'.format(col)],\n df['{}_left'.format(col)])\n drop_list = ['{}_left'.format(col) for col in common_cols if col !=\n 'manual_id']\n drop_list.extend(['{}_right'.format(col) for col in common_cols if \n col != 'manual_id'])\n df.drop(drop_list, axis=1, inplace=True)\n return df[cols]\n\n def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheets[0]]\n ws.title = sheets[0].replace('ACRONYM', acronym)\n try:\n curr_df = pd.read_excel(src)\n except:\n curr_df = pd.read_csv(src)\n if zeta0_df is not None:\n curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id',\n right_on='manual_id', how='left', suffixes=('_left', '_right'))\n common_columns = [col for col in curr_df.columns if col in\n zeta0_df.columns]\n self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns,\n common_columns)\n else:\n self.source_df = curr_df\n session_list = self.source_df.fillna('').values.tolist()\n for row_iter in range(len(session_list)):\n print(row_iter)\n for col_iter in range(len(session_list[row_iter])):\n print(col_iter)\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = self.affiliation_cleaner(session_list[\n row_iter][col_iter])\n wb.save(self.dest_file)\n\n def process_merger(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.copy_larvol_xlsx(template, acronym)\n if merge.upper() == 'YES':\n zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)\n self.update_larvol_xlsx(source_file, acronym, sheets, columns,\n zeta0)\n else:\n self.update_larvol_xlsx(source_file, acronym, sheets, columns)\n\n def separated_by_number(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n affiliations_dict = {}\n prev_affiliation = None\n for affiliation in affiliations_list:\n if affiliation != '':\n group = re.findall('\\\\d+', affiliation)\n if group != []:\n num = list(map(int, group))[0]\n affiliations_dict[str(num)] = str(num).join(affiliation\n .split(str(num))[1:]).strip(',. ')\n prev_affiliation = num\n elif prev_affiliation is not None:\n num = prev_affiliation\n affiliations_dict[str(num)] = affiliations_dict[str(num)\n ] + '; ' + affiliation.strip(',. ')\n prev_affiliation = num\n for author in authors_list:\n group = re.findall('\\\\d+', author)\n num_list = list(map(int, group))\n if num_list != []:\n author_name = author.split(str(num_list[0]))[0].strip(',.-; ')\n else:\n author_name = author.strip(',.-; ')\n for num in num_list:\n try:\n elem = affiliations_dict[str(num)]\n except:\n affiliations_dict[str(num)] = ''\n cprint(\n \"Exception for manual_id: {} as affiliation index {} wasn't found\"\n .format(manual_id, str(num)), 'yellow', attrs=['bold'])\n affiliation_name = '; '.join([affiliations_dict[str(num)].strip\n (',.- ') for num in num_list])\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def separated_by_semicolon(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n try:\n affiliation_name = affiliations_list[iter].strip(',.- ')\n except:\n affiliation_name = ''\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n <mask token>\n\n def process_pairtron(self, sheet):\n source_df = self.source_df\n source_df = source_df[source_df['authors'].notnull()]\n source_id_list = source_df['source_id'].fillna('').tolist()\n manual_id_list = source_df['manual_id'].fillna('').tolist()\n authors_list = source_df['authors'].tolist()\n affiliation_list = source_df['author_affiliation'].fillna('').tolist()\n pairtron_list = []\n for iter in range(len(authors_list)):\n author_tokens = [elem.strip() for elem in authors_list[iter].\n split(';')]\n affiliation_tokens = [elem.strip() for elem in affiliation_list\n [iter].split(';')]\n try:\n if author_tokens[0][-1].isdigit() and '1' in affiliation_list[\n iter]:\n pairtron_list.extend(self.separated_by_number(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif len(author_tokens) == len(affiliation_tokens):\n pairtron_list.extend(self.separated_by_semicolon(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif author_tokens[0][-1].isdigit(\n ) and '1' not in affiliation_list[iter]:\n cprint('ALERT: manual_id: {} has missing affiliations.'\n .format(manual_id_list[iter]), 'red', attrs=['bold'])\n else:\n pairtron_list.extend(self.common_affiliation(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n except:\n pass\n df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id',\n 'authors', 'author_affiliation'])\n df.drop_duplicates(inplace=True)\n authorsInfo_list = df.values.tolist()\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheet]\n for row_iter in range(len(authorsInfo_list)):\n for col_iter in range(len(authorsInfo_list[row_iter])):\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = authorsInfo_list[row_iter][col_iter]\n wb.save(self.dest_file)\n\n def processData(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.process_merger(source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns)\n self.process_pairtron(sheets[1])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass pairtron:\n\n def affiliation_cleaner(self, affiliation):\n affiliation = str(affiliation)\n affiliation = affiliation.strip(' ;').replace(' ', ' ').replace(' ',\n ' ')\n while ' ;' in affiliation:\n affiliation = affiliation.replace(' ;', ';')\n while ';;' in affiliation:\n affiliation = affiliation.replace(';;', ';')\n return affiliation\n\n def zeta0_creation(self, indexed_files_dir, merge_columns):\n \"\"\" Returns pandas dataframe which has latest record for each manual id after merging all \"sheet_name\"\n in the previously indexed_files which are present in \"indexed_files_dir\"\n \"\"\"\n indexed_files = [file for file in os.listdir(indexed_files_dir) if \n not file.startswith('~')]\n indexed_files_dict = {}\n indexed_files_dict.clear()\n dateList = []\n del dateList[:]\n for file in indexed_files:\n dated = file.split('_')[-1].split('.')[0]\n dated = dated[4:] + dated[:4]\n dateList.append(dated)\n indexed_files_dict[dated] = file\n dataframes = {}\n for dated, file in indexed_files_dict.items():\n file_name = indexed_files_dir + '\\\\' + file\n dataframes[dated] = pd.read_excel(file_name, sheet_name=0)\n dataframes[dated]['file_date'] = dated\n dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in\n dataframes[dated]['manual_id']]\n merged_df = pd.concat([dataframes[dated] for dated in dateList],\n ignore_index=True)\n merged_df = merged_df.sort_values('file_date', ascending=False)\n zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')\n pd.set_option('mode.chained_assignment', None)\n for col in zeta0.columns:\n zeta0[col] = zeta0[col].astype('str')\n zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == 'object' else\n x)\n zeta0 = zeta0.sort_values('mid', ascending=True)\n if 'manual_id' not in merge_columns:\n merge_columns.append('manual_id')\n zeta0 = zeta0[merge_columns]\n return zeta0\n\n def copy_larvol_xlsx(self, template, acronym):\n date = datetime.now().date().strftime('%m%d%Y')\n self.dest_file = os.path.basename(template).replace('ACRONYM', acronym\n ).replace('MMDDYYYY', date + '_Pairtron')\n shutil.copy2(template, self.dest_file)\n\n def selectionAfterJoin(self, df, cols, common_cols):\n for col in common_cols:\n if col != 'manual_id':\n df[col] = np.where(df['{}_left'.format(col)].isnull() | df[\n '{}_right'.format(col)].notnull() & (df['{}_right'.\n format(col)] != '') & (df['{}_left'.format(col)] != df[\n '{}_right'.format(col)]), df['{}_right'.format(col)],\n df['{}_left'.format(col)])\n drop_list = ['{}_left'.format(col) for col in common_cols if col !=\n 'manual_id']\n drop_list.extend(['{}_right'.format(col) for col in common_cols if \n col != 'manual_id'])\n df.drop(drop_list, axis=1, inplace=True)\n return df[cols]\n\n def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheets[0]]\n ws.title = sheets[0].replace('ACRONYM', acronym)\n try:\n curr_df = pd.read_excel(src)\n except:\n curr_df = pd.read_csv(src)\n if zeta0_df is not None:\n curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id',\n right_on='manual_id', how='left', suffixes=('_left', '_right'))\n common_columns = [col for col in curr_df.columns if col in\n zeta0_df.columns]\n self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns,\n common_columns)\n else:\n self.source_df = curr_df\n session_list = self.source_df.fillna('').values.tolist()\n for row_iter in range(len(session_list)):\n print(row_iter)\n for col_iter in range(len(session_list[row_iter])):\n print(col_iter)\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = self.affiliation_cleaner(session_list[\n row_iter][col_iter])\n wb.save(self.dest_file)\n\n def process_merger(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.copy_larvol_xlsx(template, acronym)\n if merge.upper() == 'YES':\n zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)\n self.update_larvol_xlsx(source_file, acronym, sheets, columns,\n zeta0)\n else:\n self.update_larvol_xlsx(source_file, acronym, sheets, columns)\n\n def separated_by_number(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n affiliations_dict = {}\n prev_affiliation = None\n for affiliation in affiliations_list:\n if affiliation != '':\n group = re.findall('\\\\d+', affiliation)\n if group != []:\n num = list(map(int, group))[0]\n affiliations_dict[str(num)] = str(num).join(affiliation\n .split(str(num))[1:]).strip(',. ')\n prev_affiliation = num\n elif prev_affiliation is not None:\n num = prev_affiliation\n affiliations_dict[str(num)] = affiliations_dict[str(num)\n ] + '; ' + affiliation.strip(',. ')\n prev_affiliation = num\n for author in authors_list:\n group = re.findall('\\\\d+', author)\n num_list = list(map(int, group))\n if num_list != []:\n author_name = author.split(str(num_list[0]))[0].strip(',.-; ')\n else:\n author_name = author.strip(',.-; ')\n for num in num_list:\n try:\n elem = affiliations_dict[str(num)]\n except:\n affiliations_dict[str(num)] = ''\n cprint(\n \"Exception for manual_id: {} as affiliation index {} wasn't found\"\n .format(manual_id, str(num)), 'yellow', attrs=['bold'])\n affiliation_name = '; '.join([affiliations_dict[str(num)].strip\n (',.- ') for num in num_list])\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def separated_by_semicolon(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n try:\n affiliation_name = affiliations_list[iter].strip(',.- ')\n except:\n affiliation_name = ''\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def common_affiliation(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n affiliation_name = affiliations_list[0].strip(',.- ')\n print(affiliation_name)\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def process_pairtron(self, sheet):\n source_df = self.source_df\n source_df = source_df[source_df['authors'].notnull()]\n source_id_list = source_df['source_id'].fillna('').tolist()\n manual_id_list = source_df['manual_id'].fillna('').tolist()\n authors_list = source_df['authors'].tolist()\n affiliation_list = source_df['author_affiliation'].fillna('').tolist()\n pairtron_list = []\n for iter in range(len(authors_list)):\n author_tokens = [elem.strip() for elem in authors_list[iter].\n split(';')]\n affiliation_tokens = [elem.strip() for elem in affiliation_list\n [iter].split(';')]\n try:\n if author_tokens[0][-1].isdigit() and '1' in affiliation_list[\n iter]:\n pairtron_list.extend(self.separated_by_number(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif len(author_tokens) == len(affiliation_tokens):\n pairtron_list.extend(self.separated_by_semicolon(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif author_tokens[0][-1].isdigit(\n ) and '1' not in affiliation_list[iter]:\n cprint('ALERT: manual_id: {} has missing affiliations.'\n .format(manual_id_list[iter]), 'red', attrs=['bold'])\n else:\n pairtron_list.extend(self.common_affiliation(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n except:\n pass\n df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id',\n 'authors', 'author_affiliation'])\n df.drop_duplicates(inplace=True)\n authorsInfo_list = df.values.tolist()\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheet]\n for row_iter in range(len(authorsInfo_list)):\n for col_iter in range(len(authorsInfo_list[row_iter])):\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = authorsInfo_list[row_iter][col_iter]\n wb.save(self.dest_file)\n\n def processData(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.process_merger(source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns)\n self.process_pairtron(sheets[1])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass pairtron:\n\n def affiliation_cleaner(self, affiliation):\n affiliation = str(affiliation)\n affiliation = affiliation.strip(' ;').replace(' ', ' ').replace(' ',\n ' ')\n while ' ;' in affiliation:\n affiliation = affiliation.replace(' ;', ';')\n while ';;' in affiliation:\n affiliation = affiliation.replace(';;', ';')\n return affiliation\n\n def zeta0_creation(self, indexed_files_dir, merge_columns):\n \"\"\" Returns pandas dataframe which has latest record for each manual id after merging all \"sheet_name\"\n in the previously indexed_files which are present in \"indexed_files_dir\"\n \"\"\"\n indexed_files = [file for file in os.listdir(indexed_files_dir) if \n not file.startswith('~')]\n indexed_files_dict = {}\n indexed_files_dict.clear()\n dateList = []\n del dateList[:]\n for file in indexed_files:\n dated = file.split('_')[-1].split('.')[0]\n dated = dated[4:] + dated[:4]\n dateList.append(dated)\n indexed_files_dict[dated] = file\n dataframes = {}\n for dated, file in indexed_files_dict.items():\n file_name = indexed_files_dir + '\\\\' + file\n dataframes[dated] = pd.read_excel(file_name, sheet_name=0)\n dataframes[dated]['file_date'] = dated\n dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in\n dataframes[dated]['manual_id']]\n merged_df = pd.concat([dataframes[dated] for dated in dateList],\n ignore_index=True)\n merged_df = merged_df.sort_values('file_date', ascending=False)\n zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')\n pd.set_option('mode.chained_assignment', None)\n for col in zeta0.columns:\n zeta0[col] = zeta0[col].astype('str')\n zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == 'object' else\n x)\n zeta0 = zeta0.sort_values('mid', ascending=True)\n if 'manual_id' not in merge_columns:\n merge_columns.append('manual_id')\n zeta0 = zeta0[merge_columns]\n return zeta0\n\n def copy_larvol_xlsx(self, template, acronym):\n date = datetime.now().date().strftime('%m%d%Y')\n self.dest_file = os.path.basename(template).replace('ACRONYM', acronym\n ).replace('MMDDYYYY', date + '_Pairtron')\n shutil.copy2(template, self.dest_file)\n\n def selectionAfterJoin(self, df, cols, common_cols):\n for col in common_cols:\n if col != 'manual_id':\n df[col] = np.where(df['{}_left'.format(col)].isnull() | df[\n '{}_right'.format(col)].notnull() & (df['{}_right'.\n format(col)] != '') & (df['{}_left'.format(col)] != df[\n '{}_right'.format(col)]), df['{}_right'.format(col)],\n df['{}_left'.format(col)])\n drop_list = ['{}_left'.format(col) for col in common_cols if col !=\n 'manual_id']\n drop_list.extend(['{}_right'.format(col) for col in common_cols if \n col != 'manual_id'])\n df.drop(drop_list, axis=1, inplace=True)\n return df[cols]\n\n def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheets[0]]\n ws.title = sheets[0].replace('ACRONYM', acronym)\n try:\n curr_df = pd.read_excel(src)\n except:\n curr_df = pd.read_csv(src)\n if zeta0_df is not None:\n curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id',\n right_on='manual_id', how='left', suffixes=('_left', '_right'))\n common_columns = [col for col in curr_df.columns if col in\n zeta0_df.columns]\n self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns,\n common_columns)\n else:\n self.source_df = curr_df\n session_list = self.source_df.fillna('').values.tolist()\n for row_iter in range(len(session_list)):\n print(row_iter)\n for col_iter in range(len(session_list[row_iter])):\n print(col_iter)\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = self.affiliation_cleaner(session_list[\n row_iter][col_iter])\n wb.save(self.dest_file)\n\n def process_merger(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.copy_larvol_xlsx(template, acronym)\n if merge.upper() == 'YES':\n zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)\n self.update_larvol_xlsx(source_file, acronym, sheets, columns,\n zeta0)\n else:\n self.update_larvol_xlsx(source_file, acronym, sheets, columns)\n\n def separated_by_number(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n affiliations_dict = {}\n prev_affiliation = None\n for affiliation in affiliations_list:\n if affiliation != '':\n group = re.findall('\\\\d+', affiliation)\n if group != []:\n num = list(map(int, group))[0]\n affiliations_dict[str(num)] = str(num).join(affiliation\n .split(str(num))[1:]).strip(',. ')\n prev_affiliation = num\n elif prev_affiliation is not None:\n num = prev_affiliation\n affiliations_dict[str(num)] = affiliations_dict[str(num)\n ] + '; ' + affiliation.strip(',. ')\n prev_affiliation = num\n for author in authors_list:\n group = re.findall('\\\\d+', author)\n num_list = list(map(int, group))\n if num_list != []:\n author_name = author.split(str(num_list[0]))[0].strip(',.-; ')\n else:\n author_name = author.strip(',.-; ')\n for num in num_list:\n try:\n elem = affiliations_dict[str(num)]\n except:\n affiliations_dict[str(num)] = ''\n cprint(\n \"Exception for manual_id: {} as affiliation index {} wasn't found\"\n .format(manual_id, str(num)), 'yellow', attrs=['bold'])\n affiliation_name = '; '.join([affiliations_dict[str(num)].strip\n (',.- ') for num in num_list])\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def separated_by_semicolon(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n try:\n affiliation_name = affiliations_list[iter].strip(',.- ')\n except:\n affiliation_name = ''\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def common_affiliation(self, source_id, manual_id, authors_list,\n affiliations_list):\n separated_authors_list = []\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n affiliation_name = affiliations_list[0].strip(',.- ')\n print(affiliation_name)\n separated_authors_list.append([source_id, manual_id,\n author_name, affiliation_name])\n return separated_authors_list\n\n def process_pairtron(self, sheet):\n source_df = self.source_df\n source_df = source_df[source_df['authors'].notnull()]\n source_id_list = source_df['source_id'].fillna('').tolist()\n manual_id_list = source_df['manual_id'].fillna('').tolist()\n authors_list = source_df['authors'].tolist()\n affiliation_list = source_df['author_affiliation'].fillna('').tolist()\n pairtron_list = []\n for iter in range(len(authors_list)):\n author_tokens = [elem.strip() for elem in authors_list[iter].\n split(';')]\n affiliation_tokens = [elem.strip() for elem in affiliation_list\n [iter].split(';')]\n try:\n if author_tokens[0][-1].isdigit() and '1' in affiliation_list[\n iter]:\n pairtron_list.extend(self.separated_by_number(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif len(author_tokens) == len(affiliation_tokens):\n pairtron_list.extend(self.separated_by_semicolon(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n elif author_tokens[0][-1].isdigit(\n ) and '1' not in affiliation_list[iter]:\n cprint('ALERT: manual_id: {} has missing affiliations.'\n .format(manual_id_list[iter]), 'red', attrs=['bold'])\n else:\n pairtron_list.extend(self.common_affiliation(\n source_id_list[iter], manual_id_list[iter],\n author_tokens, affiliation_tokens))\n except:\n pass\n df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id',\n 'authors', 'author_affiliation'])\n df.drop_duplicates(inplace=True)\n authorsInfo_list = df.values.tolist()\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheet]\n for row_iter in range(len(authorsInfo_list)):\n for col_iter in range(len(authorsInfo_list[row_iter])):\n ws.cell(row=row_iter + 2, column=col_iter + 1\n ).value = authorsInfo_list[row_iter][col_iter]\n wb.save(self.dest_file)\n\n def processData(self, source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns):\n self.process_merger(source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns)\n self.process_pairtron(sheets[1])\n\n\nif __name__ == '__main__':\n start = datetime.now()\n print('Script Start Time ', start)\n print('Script Running.....\\n')\n parser = ConfigParser()\n parser.read('pairtron_config.ini')\n source_file = parser.get('dynamic_fields', 'source_file')\n acronym = parser.get('dynamic_fields', 'ACRONYM')\n merge = parser.get('dynamic_fields', 'merge')\n merge_columns = [elem.strip() for elem in parser.get('dynamic_fields',\n 'merge_columns').split(',')]\n template = parser.get('static_fields', 'template')\n sheets = parser.get('static_fields', 'sheets').split(',')\n indexed_files_dir = parser.get('static_fields', 'indexed_files_dir')\n columns = parser.get('static_fields', 'columns').split(',')\n obj = pairtron()\n obj.processData(source_file, acronym, merge, template, sheets,\n indexed_files_dir, columns, merge_columns)\n total_time = datetime.now() - start\n print('\\nScript End Time ', datetime.now())\n print('Execution Time', total_time)\n",
"step-5": "import re\nimport pandas as pd\nimport pandas.io.formats.excel\nfrom configparser import ConfigParser\nfrom datetime import datetime\nfrom termcolor import cprint\nimport os\nimport shutil\nfrom openpyxl import load_workbook\nimport numpy as np\n\nclass pairtron():\n\n def affiliation_cleaner(self, affiliation):\n # print(affiliation)\n affiliation = str(affiliation)\n affiliation = affiliation.strip(\" ;\").replace(\" \", \" \").replace(\" \",\" \")\n while ' ;' in affiliation:\n affiliation = affiliation.replace(\" ;\", \";\")\n while ';;' in affiliation:\n affiliation = affiliation.replace(\";;\", \";\")\n return affiliation\n\n def zeta0_creation(self, indexed_files_dir, merge_columns):\n \"\"\" Returns pandas dataframe which has latest record for each manual id after merging all \"sheet_name\"\n in the previously indexed_files which are present in \"indexed_files_dir\"\n \"\"\"\n indexed_files = [file for file in os.listdir(indexed_files_dir) if not file.startswith(\"~\")]\n\n indexed_files_dict = {}\n indexed_files_dict.clear()\n\n dateList = []\n del dateList[:]\n for file in indexed_files:\n dated = file.split('_')[-1].split('.')[0]\n dated = dated[4:] + dated[:4]\n dateList.append(dated)\n indexed_files_dict[dated] = file\n\n dataframes = {}\n\n for dated, file in indexed_files_dict.items():\n file_name = indexed_files_dir + '\\\\' + file\n dataframes[dated] = pd.read_excel(file_name, sheet_name=0)\n dataframes[dated]['file_date'] = dated\n dataframes[dated]['mid'] = [int(elem.split('_')[-1]) for elem in dataframes[dated]['manual_id']]\n\n merged_df = pd.concat([dataframes[dated] for dated in dateList], ignore_index=True)\n merged_df = merged_df.sort_values('file_date', ascending=False)\n zeta0 = merged_df.drop_duplicates(subset='manual_id', keep='first')\n pd.set_option('mode.chained_assignment', None)\n for col in zeta0.columns:\n zeta0[col] = zeta0[col].astype('str')\n zeta0 = zeta0.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x)\n zeta0 = zeta0.sort_values('mid', ascending=True)\n if \"manual_id\" not in merge_columns:\n merge_columns.append(\"manual_id\")\n zeta0 = zeta0[merge_columns]\n # print(zeta0)\n return zeta0\n\n def copy_larvol_xlsx(self, template, acronym):\n date = datetime.now().date().strftime('%m%d%Y')\n self.dest_file = os.path.basename(template).replace('ACRONYM',acronym).replace('MMDDYYYY', date + '_Pairtron')\n shutil.copy2(template, self.dest_file)\n\n def selectionAfterJoin(self, df, cols, common_cols):\n for col in common_cols:\n if col != 'manual_id':\n df[col] = np.where(df['{}_left'.format(col)].isnull() | ((df['{}_right'.format(col)].notnull()) & (df['{}_right'.format(col)] != '') & (df['{}_left'.format(col)] != df['{}_right'.format(col)])), df['{}_right'.format(col)], df['{}_left'.format(col)])\n drop_list = ['{}_left'.format(col) for col in common_cols if col != 'manual_id']\n drop_list.extend(['{}_right'.format(col) for col in common_cols if col != 'manual_id'])\n df.drop(drop_list, axis=1, inplace=True)\n return df[cols]\n\n def update_larvol_xlsx(self, src, acronym, sheets, columns, zeta0_df=None):\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheets[0]]\n ws.title = sheets[0].replace('ACRONYM',acronym)\n try:\n curr_df = pd.read_excel(src)\n except:\n curr_df = pd.read_csv(src)\n if zeta0_df is not None:\n curr_jn_zeta = pd.merge(curr_df, zeta0_df, left_on='manual_id', right_on='manual_id', how='left', suffixes=('_left', '_right'))\n common_columns = [col for col in curr_df.columns if col in zeta0_df.columns]\n self.source_df = self.selectionAfterJoin(curr_jn_zeta, columns, common_columns)\n else:\n self.source_df = curr_df\n session_list = self.source_df.fillna('').values.tolist()\n for row_iter in range(len(session_list)):\n print(row_iter)\n for col_iter in range(len(session_list[row_iter])):\n print(col_iter)\n ws.cell(row=row_iter+2, column=col_iter+1).value = self.affiliation_cleaner(session_list[row_iter][col_iter])\n wb.save(self.dest_file)\n\n def process_merger(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):\n self.copy_larvol_xlsx(template, acronym)\n if merge.upper() == 'YES':\n zeta0 = self.zeta0_creation(indexed_files_dir, merge_columns)\n self.update_larvol_xlsx(source_file, acronym, sheets, columns, zeta0)\n else:\n self.update_larvol_xlsx(source_file, acronym, sheets, columns)\n\n def separated_by_number(self, source_id, manual_id, authors_list, affiliations_list):\n separated_authors_list = []\n affiliations_dict = {}\n prev_affiliation = None\n\n for affiliation in affiliations_list:\n #print(manual_id)\n #print(affiliation)\n if affiliation != '':\n group = re.findall(r'\\d+', affiliation)\n #print(group)\n if group != []:\n num = list(map(int, group))[0]\n affiliations_dict[str(num)] = str(num).join(affiliation.split(str(num))[1:]).strip(',. ')\n prev_affiliation = num\n elif prev_affiliation is not None:\n num = prev_affiliation\n affiliations_dict[str(num)] = affiliations_dict[str(num)] + '; ' + affiliation.strip(',. ')\n prev_affiliation = num\n\n for author in authors_list:\n #print(author)\n group = re.findall(r'\\d+', author)\n num_list = list(map(int, group))\n #print(num_list)\n if num_list != []:\n author_name = author.split(str(num_list[0]))[0].strip(',.-; ')\n else:\n author_name = author.strip(',.-; ')\n #print(author_name)\n for num in num_list:\n try:\n elem = affiliations_dict[str(num)]\n except:\n affiliations_dict[str(num)] = ''\n cprint(\"Exception for manual_id: {} as affiliation index {} wasn't found\".format(manual_id, str(num)), 'yellow', attrs=['bold'])\n\n affiliation_name = '; '.join([affiliations_dict[str(num)].strip(',.- ') for num in num_list])\n #print(affiliation_name)\n separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])\n\n return separated_authors_list\n\n def separated_by_semicolon(self, source_id, manual_id, authors_list, affiliations_list):\n separated_authors_list = []\n\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n try:\n affiliation_name = affiliations_list[iter].strip(',.- ')\n except:\n affiliation_name = ''\n separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])\n\n return separated_authors_list\n\n\n def common_affiliation(self, source_id, manual_id, authors_list, affiliations_list):\n separated_authors_list = []\n\n for iter in range(len(authors_list)):\n author_name = authors_list[iter].strip(',.-; ')\n affiliation_name = affiliations_list[0].strip(',.- ')\n print(affiliation_name)\n separated_authors_list.append([source_id, manual_id, author_name, affiliation_name])\n\n return separated_authors_list\n\n def process_pairtron(self, sheet):\n source_df = self.source_df\n source_df = source_df[source_df['authors'].notnull()]\n source_id_list = source_df['source_id'].fillna('').tolist()\n manual_id_list = source_df['manual_id'].fillna('').tolist()\n authors_list = source_df['authors'].tolist()\n affiliation_list = source_df['author_affiliation'].fillna('').tolist()\n pairtron_list = []\n\n for iter in range(len(authors_list)):\n #print(iter, manual_id_list[iter])\n author_tokens = [elem.strip() for elem in authors_list[iter].split(';')]\n affiliation_tokens = [elem.strip() for elem in affiliation_list[iter].split(';')]\n try:\n if author_tokens[0][-1].isdigit() and '1' in affiliation_list[iter]:\n pairtron_list.extend(self.separated_by_number(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))\n elif len(author_tokens) == len(affiliation_tokens):\n pairtron_list.extend(self.separated_by_semicolon(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))\n elif author_tokens[0][-1].isdigit() and '1' not in affiliation_list[iter]:\n cprint(\"ALERT: manual_id: {} has missing affiliations.\".format(manual_id_list[iter]), 'red', attrs=['bold'])\n else:\n pairtron_list.extend(self.common_affiliation(source_id_list[iter], manual_id_list[iter], author_tokens, affiliation_tokens))\n except:\n pass\n df = pd.DataFrame(pairtron_list, columns=['source_id', 'manual_id', 'authors', 'author_affiliation'])\n df.drop_duplicates(inplace = True)\n authorsInfo_list = df.values.tolist()\n wb = load_workbook(filename=self.dest_file)\n ws = wb[sheet]\n for row_iter in range(len(authorsInfo_list)):\n for col_iter in range(len(authorsInfo_list[row_iter])):\n ws.cell(row=row_iter+2, column=col_iter+1).value = authorsInfo_list[row_iter][col_iter]\n wb.save(self.dest_file)\n\n def processData(self, source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns):\n self.process_merger(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)\n self.process_pairtron(sheets[1])\n\nif __name__ == \"__main__\":\n\n start = datetime.now()\n print (\"Script Start Time \",start)\n print (\"Script Running.....\\n\")\n\n parser = ConfigParser()\n parser.read('pairtron_config.ini')\n\n source_file = parser.get('dynamic_fields', 'source_file')\n acronym = parser.get('dynamic_fields', 'ACRONYM')\n merge = parser.get('dynamic_fields', 'merge')\n merge_columns = [elem.strip() for elem in parser.get('dynamic_fields', 'merge_columns').split(',')]\n template = parser.get('static_fields', 'template')\n sheets = parser.get('static_fields', 'sheets').split(',')\n indexed_files_dir = parser.get('static_fields', 'indexed_files_dir')\n columns = parser.get('static_fields', 'columns').split(',')\n\n obj = pairtron()\n obj.processData(source_file, acronym, merge, template, sheets, indexed_files_dir, columns, merge_columns)\n\n total_time = datetime.now() - start\n print (\"\\nScript End Time \",datetime.now())\n print (\"Execution Time\", total_time)",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
import requests
from requests import Response
from auditlogging.Trail import Trail
from utils.Utils import is_empty
from auditlogging.agents.AuditAgent import AuditAgent
class APIAuditAgent(AuditAgent):
"""
Captures the audit trail using a REST endpoint URL (POST)
Add this agent to Auditor in order to capture audit log to an endpoint.
Note
-----------
1. If user wants to POST custom JSON request body then,
pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)
2. After each call to capture() or capture_custom() latest response is preserved
until next endpoint request.
To get the response, after each invocation please call endpoint_response() to get response
"""
def __init__(self):
self._url = 'http://localhost:3000/auditlogs/create'
self._resp = None
def change_endpoint(self, url: str):
"""
Changes the default POST endpoint URL.
Caller can specify any POST endpoint URL to create resource in
database/storage.
Parameters
----------
url : str
a new POST endpoint URL
"""
if not is_empty(url):
self._url = url
def capture(self, trail: Trail):
"""
Capture Trail to endpoint. Internally it transforms JSON
object while calling POST endpoint
Parameters
----------
trail : Trail
a trail object to be used for POST
"""
self._call_endpoint(trail)
def capture_custom(self, jsontrail: str):
"""
Capture custom JSON trail to endpoint
Parameters
----------
jsontrail : str
custom JSON required for
"""
self._mark_json_trail(jsontrail)
def endpoint_response(self) ->Response:
"""
access the response of the endpoint URL
Returns
--------
Response
Http response
"""
return self._resp
def _set_response(self, resp: Response):
self._resp = resp
def _call_endpoint(self, trail: Trail):
_resp = requests.post(self._url, json=trail.build_trail())
if _resp.status_code is not 200:
print(_resp.json())
self._set_response(resp=_resp)
def _mark_json_trail(self, jsontrail: str):
_resp = requests.post(self._url, data=jsontrail)
self._set_response(resp=_resp)
|
normal
|
{
"blob_id": "45a57fac564f23253f9d9cd5d0fd820e559c15b9",
"index": 1212,
"step-1": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n <mask token>\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n <mask token>\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-2": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n <mask token>\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-3": "<mask token>\n\n\nclass APIAuditAgent(AuditAgent):\n \"\"\"\n Captures the audit trail using a REST endpoint URL (POST)\n Add this agent to Auditor in order to capture audit log to an endpoint.\n\n Note\n -----------\n 1. If user wants to POST custom JSON request body then,\n pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)\n 2. After each call to capture() or capture_custom() latest response is preserved\n until next endpoint request.\n To get the response, after each invocation please call endpoint_response() to get response\n\n \"\"\"\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-4": "import requests\nfrom requests import Response\nfrom auditlogging.Trail import Trail\nfrom utils.Utils import is_empty\nfrom auditlogging.agents.AuditAgent import AuditAgent\n\n\nclass APIAuditAgent(AuditAgent):\n \"\"\"\n Captures the audit trail using a REST endpoint URL (POST)\n Add this agent to Auditor in order to capture audit log to an endpoint.\n\n Note\n -----------\n 1. If user wants to POST custom JSON request body then,\n pass a valid JSON string to constructor and call Auditor.audit_custom(your_custom_json)\n 2. After each call to capture() or capture_custom() latest response is preserved\n until next endpoint request.\n To get the response, after each invocation please call endpoint_response() to get response\n\n \"\"\"\n\n def __init__(self):\n self._url = 'http://localhost:3000/auditlogs/create'\n self._resp = None\n\n def change_endpoint(self, url: str):\n \"\"\"\n Changes the default POST endpoint URL.\n Caller can specify any POST endpoint URL to create resource in\n database/storage.\n\n Parameters\n ----------\n url : str\n a new POST endpoint URL\n\n \"\"\"\n if not is_empty(url):\n self._url = url\n\n def capture(self, trail: Trail):\n \"\"\"\n Capture Trail to endpoint. Internally it transforms JSON\n object while calling POST endpoint\n\n Parameters\n ----------\n trail : Trail\n a trail object to be used for POST\n\n \"\"\"\n self._call_endpoint(trail)\n\n def capture_custom(self, jsontrail: str):\n \"\"\"\n Capture custom JSON trail to endpoint\n\n Parameters\n ----------\n jsontrail : str\n custom JSON required for\n\n \"\"\"\n self._mark_json_trail(jsontrail)\n\n def endpoint_response(self) ->Response:\n \"\"\"\n access the response of the endpoint URL\n\n Returns\n --------\n Response\n Http response\n\n \"\"\"\n return self._resp\n\n def _set_response(self, resp: Response):\n self._resp = resp\n\n def _call_endpoint(self, trail: Trail):\n _resp = requests.post(self._url, json=trail.build_trail())\n if _resp.status_code is not 200:\n print(_resp.json())\n self._set_response(resp=_resp)\n\n def _mark_json_trail(self, jsontrail: str):\n _resp = requests.post(self._url, data=jsontrail)\n self._set_response(resp=_resp)\n",
"step-5": null,
"step-ids": [
8,
9,
10,
11
]
}
|
[
8,
9,
10,
11
] |
import sys
input = sys.stdin.readline
N = int(input())
A, B, C, D = [], [], [], []
for i in range(N):
a, b, c, d = map(int, input().split())
A.append(a)
B.append(b)
C.append(c)
D.append(d)
AB = []
CD = []
for i in range(N):
for j in range(N):
AB.append(A[i] + B[j])
CD.append(C[i] + D[j])
AB.sort()
CD.sort()
answer = 0
left, right = 0, len(CD) - 1
while left < len(AB) and right >= 0:
total = AB[left] + CD[right]
if total == 0:
left_count, right_count = 1, 1
left_tmp = left
left += 1
while left < len(AB) and AB[left] + CD[right] == 0:
left_count += 1
left += 1
right -= 1
while right >= 0 and AB[left_tmp] + CD[right] == 0:
right_count += 1
right -= 1
answer += (left_count * right_count)
elif total > 0:
right -= 1
else:
left += 1
print(answer)
|
normal
|
{
"blob_id": "2a9426653146603d9aa79a59ce181d97aa3c551c",
"index": 8525,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\n<mask token>\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\n<mask token>\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-4": "import sys\ninput = sys.stdin.readline\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\nAB.sort()\nCD.sort()\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n if total == 0:\n left_count, right_count = 1, 1\n left_tmp = left\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n answer += left_count * right_count\n elif total > 0:\n right -= 1\n else:\n left += 1\nprint(answer)\n",
"step-5": "import sys\ninput = sys.stdin.readline\n\nN = int(input())\nA, B, C, D = [], [], [], []\nfor i in range(N):\n a, b, c, d = map(int, input().split())\n A.append(a)\n B.append(b)\n C.append(c)\n D.append(d)\n\nAB = []\nCD = []\nfor i in range(N):\n for j in range(N):\n AB.append(A[i] + B[j])\n CD.append(C[i] + D[j])\n\nAB.sort()\nCD.sort()\n\nanswer = 0\nleft, right = 0, len(CD) - 1\nwhile left < len(AB) and right >= 0:\n total = AB[left] + CD[right]\n\n if total == 0: \n left_count, right_count = 1, 1\n left_tmp = left\n\n left += 1\n while left < len(AB) and AB[left] + CD[right] == 0:\n left_count += 1\n left += 1\n\n right -= 1\n while right >= 0 and AB[left_tmp] + CD[right] == 0:\n right_count += 1\n right -= 1\n \n answer += (left_count * right_count)\n\n elif total > 0:\n right -= 1\n else:\n left += 1\n\nprint(answer)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
import urllib.request as ulib
import json
from bs4 import BeautifulSoup as Bsoup
def find_links(name):
name = name.replace(" ", "+")
url_str = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + \
'\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + \
'\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + \
'\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'
headers = {"User-Agent": "Chrome/65.0.3325.162 Safari/537.36", "Content-Type": "application/json"}
url_str = url_str.format(name, 0)
print(url_str)
request = ulib.Request(url_str, None, headers)
json_str = ulib.urlopen(request).read()
json_str = json.loads(json_str)
soup = Bsoup(json_str[1][1], 'lxml')
soup_imgs = soup.find_all("img")
img_links = [img["src"] for img in soup_imgs]
return img_links
def download_images(links, name):
dir_name = name.replace(" ", "_")
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
for i, img_link in enumerate(links):
img_path = os.path.join(dir_name, "{:06}.png".format(i))
ulib.urlretrieve(img_link, img_path)
if __name__ == "__main__":
search_str = "yoyo"
links = find_links(search_str)
download_images(links, search_str)
print("downloding images.... done!!!")
|
normal
|
{
"blob_id": "02ffdd1c03cc20883eddc691fc841022b4ff40fd",
"index": 1601,
"step-1": "<mask token>\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\nif __name__ == '__main__':\n search_str = 'yoyo'\n links = find_links(search_str)\n download_images(links, search_str)\n print('downloding images.... done!!!')\n",
"step-4": "import os\nimport urllib.request as ulib\nimport json\nfrom bs4 import BeautifulSoup as Bsoup\n\n\ndef find_links(name):\n name = name.replace(' ', '+')\n url_str = (\n 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' +\n '\\\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}'\n +\n '\\\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg'\n + '\\\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s')\n headers = {'User-Agent': 'Chrome/65.0.3325.162 Safari/537.36',\n 'Content-Type': 'application/json'}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all('img')\n img_links = [img['src'] for img in soup_imgs]\n return img_links\n\n\ndef download_images(links, name):\n dir_name = name.replace(' ', '_')\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, '{:06}.png'.format(i))\n ulib.urlretrieve(img_link, img_path)\n\n\nif __name__ == '__main__':\n search_str = 'yoyo'\n links = find_links(search_str)\n download_images(links, search_str)\n print('downloding images.... done!!!')\n",
"step-5": "import os\nimport urllib.request as ulib\nimport json\nfrom bs4 import BeautifulSoup as Bsoup\n\n\ndef find_links(name):\n name = name.replace(\" \", \"+\")\n\n url_str = 'https://www.google.com/search?ei=1m7NWePfFYaGmQG51q7IBg&hl=en&q={}' + \\\n '\\&tbm=isch&ved=0ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ&start={}' + \\\n '\\&yv=2&vet=10ahUKEwjjovnD7sjWAhUGQyYKHTmrC2kQuT0I7gEoAQ.1m7NWePfFYaGmQG51q7IBg' + \\\n '\\.i&ijn=1&asearch=ichunk&async=_id:rg_s,_pms:s'\n\n headers = {\"User-Agent\": \"Chrome/65.0.3325.162 Safari/537.36\", \"Content-Type\": \"application/json\"}\n url_str = url_str.format(name, 0)\n print(url_str)\n request = ulib.Request(url_str, None, headers)\n json_str = ulib.urlopen(request).read()\n json_str = json.loads(json_str)\n soup = Bsoup(json_str[1][1], 'lxml')\n soup_imgs = soup.find_all(\"img\")\n img_links = [img[\"src\"] for img in soup_imgs]\n return img_links\n\ndef download_images(links, name):\n dir_name = name.replace(\" \", \"_\")\n if not os.path.isdir(dir_name):\n os.mkdir(dir_name)\n\n for i, img_link in enumerate(links):\n img_path = os.path.join(dir_name, \"{:06}.png\".format(i))\n ulib.urlretrieve(img_link, img_path)\n\nif __name__ == \"__main__\":\n\n search_str = \"yoyo\"\n links = find_links(search_str)\n download_images(links, search_str)\n\n print(\"downloding images.... done!!!\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.password_validation import validate_password
from .models import Account
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email', 'email_confirmation', 'bio', 'avatar']
def clean(self, *args, **kwargs):
cleaned_data = super(EditProfileModelForm, self).clean()
email = self.cleaned_data.get('email')
email_confirmation = self.cleaned_data.get('email_confirmation')
if email and email_confirmation and email != email_confirmation:
raise forms.ValidationError("Emails do not match")
return cleaned_data
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError("New password must be different than the old password")
if (user.first_name != "" and user.first_name.lower() in new_password.lower()
or user.last_name != "" and user.last_name.lower() in new_password.lower()):
raise forms.ValidationError("You cannot use personal information in your password")
if new_password.isupper() or new_password.islower():
raise forms.ValidationError("Password must contain uppercase and lowercase letters")
if re.match("^[a-zA-Z0-9]*$", new_password):
raise forms.ValidationError("Password must contain a special character")
return self.cleaned_data
|
normal
|
{
"blob_id": "af442d4a78930a0ebcd85a1cdfe4aa86461be5c1",
"index": 1274,
"step-1": "<mask token>\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-2": "<mask token>\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n <mask token>\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-3": "<mask token>\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError('Emails do not match')\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-4": "import re\nfrom django import forms\nfrom django.contrib.auth import password_validation\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.password_validation import validate_password\nfrom .models import Account\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError('Emails do not match')\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-5": "import re\n\nfrom django import forms\nfrom django.contrib.auth import password_validation\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.password_validation import validate_password\n\nfrom .models import Account\n\n\nclass EditProfileModelForm(forms.ModelForm):\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email', 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError(\"Emails do not match\")\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n\n validate_password(new_password, user)\n\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\"New password must be different than the old password\")\n\n if (user.first_name != \"\" and user.first_name.lower() in new_password.lower()\n or user.last_name != \"\" and user.last_name.lower() in new_password.lower()):\n raise forms.ValidationError(\"You cannot use personal information in your password\")\n\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\"Password must contain uppercase and lowercase letters\")\n\n if re.match(\"^[a-zA-Z0-9]*$\", new_password):\n raise forms.ValidationError(\"Password must contain a special character\")\n\n return self.cleaned_data\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def get_analyse(curse):
'''
要求curse数据中index为时间,columns为策略名称,每一列为该策略净值
'''
qf_drawdown = []
qf_yeild = []
qf_std = []
date = curse.index
y = curse.copy()
for i in curse.columns:
# 计算当前日之前的资金曲线最高点
y["max2here"] = y[i].expanding().max()
# 计算历史最高值到当日的剩余量
y["dd2here"] = y[i] / y["max2here"]
# 计算完回撤后剩余量的最小值(即最大回撤的剩余量),以及最大回撤的结束时间
remain = y.sort_values(by="dd2here").iloc[0]["dd2here"]
end_date = y.sort_values(by="dd2here").iloc[0]
drawdown = round((1 - remain) * 100, 2)
qf_drawdown.append(drawdown)
daylenth = len(date) - 1
yeild = round(((y[i][daylenth]) ** (52 / daylenth) - 1) * 100, 2)
qf_yeild.append(yeild)
y1 = y[i]
r1 = y1 / y1.shift(1) - 1
std = round(np.nanstd(r1) * 52 ** 0.5 * 100, 2)
qf_std.append(std)
drawdown = pd.DataFrame(qf_drawdown, index=curse.columns, columns=["最大回撤"])
drawdown["年化收益率"] = qf_yeild
drawdown["Calmar比率"] = drawdown["年化收益率"] / drawdown["最大回撤"]
drawdown["年波动率"] = qf_std
drawdown["夏普比率"] = drawdown["年化收益率"] / drawdown["年波动率"]
return drawdown
|
normal
|
{
"blob_id": "56d90835e64bd80fd9a6bb3a9b414e154d314d4a",
"index": 5108,
"step-1": "<mask token>\n",
"step-2": "def get_analyse(curse):\n \"\"\"\n 要求curse数据中index为时间,columns为策略名称,每一列为该策略净值\n\n \"\"\"\n qf_drawdown = []\n qf_yeild = []\n qf_std = []\n date = curse.index\n y = curse.copy()\n for i in curse.columns:\n y['max2here'] = y[i].expanding().max()\n y['dd2here'] = y[i] / y['max2here']\n remain = y.sort_values(by='dd2here').iloc[0]['dd2here']\n end_date = y.sort_values(by='dd2here').iloc[0]\n drawdown = round((1 - remain) * 100, 2)\n qf_drawdown.append(drawdown)\n daylenth = len(date) - 1\n yeild = round((y[i][daylenth] ** (52 / daylenth) - 1) * 100, 2)\n qf_yeild.append(yeild)\n y1 = y[i]\n r1 = y1 / y1.shift(1) - 1\n std = round(np.nanstd(r1) * 52 ** 0.5 * 100, 2)\n qf_std.append(std)\n drawdown = pd.DataFrame(qf_drawdown, index=curse.columns, columns=['最大回撤'])\n drawdown['年化收益率'] = qf_yeild\n drawdown['Calmar比率'] = drawdown['年化收益率'] / drawdown['最大回撤']\n drawdown['年波动率'] = qf_std\n drawdown['夏普比率'] = drawdown['年化收益率'] / drawdown['年波动率']\n return drawdown\n",
"step-3": "\r\ndef get_analyse(curse):\r\n '''\r\n 要求curse数据中index为时间,columns为策略名称,每一列为该策略净值\r\n\r\n '''\r\n qf_drawdown = []\r\n qf_yeild = []\r\n qf_std = []\r\n date = curse.index\r\n y = curse.copy()\r\n for i in curse.columns:\r\n # 计算当前日之前的资金曲线最高点\r\n y[\"max2here\"] = y[i].expanding().max()\r\n # 计算历史最高值到当日的剩余量\r\n y[\"dd2here\"] = y[i] / y[\"max2here\"]\r\n\r\n # 计算完回撤后剩余量的最小值(即最大回撤的剩余量),以及最大回撤的结束时间\r\n remain = y.sort_values(by=\"dd2here\").iloc[0][\"dd2here\"]\r\n end_date = y.sort_values(by=\"dd2here\").iloc[0]\r\n drawdown = round((1 - remain) * 100, 2)\r\n qf_drawdown.append(drawdown)\r\n daylenth = len(date) - 1\r\n yeild = round(((y[i][daylenth]) ** (52 / daylenth) - 1) * 100, 2)\r\n qf_yeild.append(yeild)\r\n y1 = y[i]\r\n r1 = y1 / y1.shift(1) - 1\r\n std = round(np.nanstd(r1) * 52 ** 0.5 * 100, 2)\r\n qf_std.append(std)\r\n drawdown = pd.DataFrame(qf_drawdown, index=curse.columns, columns=[\"最大回撤\"])\r\n drawdown[\"年化收益率\"] = qf_yeild\r\n drawdown[\"Calmar比率\"] = drawdown[\"年化收益率\"] / drawdown[\"最大回撤\"]\r\n drawdown[\"年波动率\"] = qf_std\r\n drawdown[\"夏普比率\"] = drawdown[\"年化收益率\"] / drawdown[\"年波动率\"]\r\n return drawdown",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Given an input with the following format
# x y yerr
# on standard input, print a fit of y = ax+b
# to the data.
import sys, string
from math import sqrt
def cP(X, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + (X[i]*X[i])/(Yerr[i]*Yerr[i])
return sum
def cQ(Yerr):
sum = 0
for i in range(len(Yerr)):
sum = sum + 1/(Yerr[i]*Yerr[i])
return sum
def cR(X, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + X[i]/(Yerr[i]*Yerr[i])
return sum
def cS(X, Y, Yerr):
sum = 0
for i in range(len(X)):
sum = sum + (X[i]*Y[i])/(Yerr[i]*Yerr[i])
return sum
def cT(Y, Yerr):
sum = 0
for i in range(len(Y)):
sum = sum + Y[i]/(Yerr[i]*Yerr[i])
return sum
def stdin2lists():
X = []
Y = []
Yerr = []
while True:
try:
r = raw_input('')
line = string.split(r)
if len(r) == 0 or r[0] == '#':
continue
f = map(lambda x: float(x), line)
X.append(f[0])
Y.append(f[1])
Yerr.append(f[2])
except EOFError:
break
return [X, Y, Yerr]
data = stdin2lists()
P = cP(data[0], data[2])
Q = cQ(data[2])
R = cR(data[0], data[2])
S = cS(data[0], data[1], data[2])
T = cT(data[1], data[2])
a1 = (Q*S - R*T)/(P*Q - R*R)
a0 = (P*T - R*S)/(P*Q - R*R)
ea1 = sqrt(Q/(P*Q - R*R))
ea0 = sqrt(P/(P*Q - R*R))
print "{:e} ± {:e}".format(a1, ea1)
print "{:e} ± {:e}".format(a0, ea0)
|
normal
|
{
"blob_id": "e78504971c51a98eed60ea8032502b6ce1a11f29",
"index": 4206,
"step-1": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Given an input with the following format\n# x y yerr\n# on standard input, print a fit of y = ax+b\n# to the data.\n\nimport sys, string\nfrom math import sqrt\n\ndef cP(X, Yerr):\n\tsum = 0\n\tfor i in range(len(X)):\n\t\tsum = sum + (X[i]*X[i])/(Yerr[i]*Yerr[i])\n\treturn sum\n\ndef cQ(Yerr):\n\tsum = 0\n\tfor i in range(len(Yerr)):\n\t\tsum = sum + 1/(Yerr[i]*Yerr[i])\n\treturn sum\n\ndef cR(X, Yerr):\n\tsum = 0\n\tfor i in range(len(X)):\n\t\tsum = sum + X[i]/(Yerr[i]*Yerr[i])\n\treturn sum\n\ndef cS(X, Y, Yerr):\n\tsum = 0\n\tfor i in range(len(X)):\n\t\tsum = sum + (X[i]*Y[i])/(Yerr[i]*Yerr[i])\n\treturn sum\n\ndef cT(Y, Yerr):\n\tsum = 0\n\tfor i in range(len(Y)):\n\t\tsum = sum + Y[i]/(Yerr[i]*Yerr[i])\n\treturn sum\n\n\ndef stdin2lists():\n\tX = []\n\tY = []\n\tYerr = []\n\n\twhile True:\n\t\ttry:\n\t\t\tr = raw_input('')\n\t\t\tline = string.split(r)\n\t\t\tif len(r) == 0 or r[0] == '#':\n\t\t\t\tcontinue\n\t\t\tf = map(lambda x: float(x), line)\n\t\t\tX.append(f[0])\n\t\t\tY.append(f[1])\n\t\t\tYerr.append(f[2])\n\t\texcept EOFError:\n\t\t\tbreak\n\treturn [X, Y, Yerr]\n\ndata = stdin2lists()\nP = cP(data[0], data[2])\nQ = cQ(data[2])\nR = cR(data[0], data[2])\nS = cS(data[0], data[1], data[2])\nT = cT(data[1], data[2])\n\na1 = (Q*S - R*T)/(P*Q - R*R)\na0 = (P*T - R*S)/(P*Q - R*R)\nea1 = sqrt(Q/(P*Q - R*R))\nea0 = sqrt(P/(P*Q - R*R))\n\nprint \"{:e} ± {:e}\".format(a1, ea1)\nprint \"{:e} ± {:e}\".format(a0, ea0)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#https://www.geeksforgeeks.org/count-of-substrings-of-length-k-with-exactly-k-distinct-characters/
#https://www.geeksforgeeks.org/count-number-of-substrings-with-exactly-k-distinct-characters/
|
normal
|
{
"blob_id": "2ca40a53291a62bbdb4386decc5a2dfa84431836",
"index": 6630,
"step-1": "#https://www.geeksforgeeks.org/count-of-substrings-of-length-k-with-exactly-k-distinct-characters/\n#https://www.geeksforgeeks.org/count-number-of-substrings-with-exactly-k-distinct-characters/\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
from unittest import TestCase, main as unittest_main, mock
from app import app
from bson.objectid import ObjectId
'''
dummy data to use in testing create, update, and delete routes
(U and D not yet made)
Inspiration taken from Playlister tutorial.
'''
sample_offer_id = ObjectId('5349b4ddd2781d08c09890f4')
sample_offer = {
'name': 'Muhammad Ali',
'offer': '4500',
'email': '[email protected]',
'location': 'Fort Worth, TX'
}
sample_form_data = {
'name': sample_offer['name'],
'offer': sample_offer['offer'],
'email': sample_offer['email'],
'location': sample_offer['location']
}
class HomelyTests(TestCase):
"""Flask tests."""
def setUp(self):
"""Get Flask test client."""
self.client = app.test_client()
# Show Flask errors that happen during tests
app.config['TESTING'] = True
def test_properties_index(self):
"""Test the properties homepage."""
result = self.client.get('/')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Welcome', result.data)
def test_offers_new(self):
"""Test the new offer creation page."""
result = self.client.get('/offers_new')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Make an Offer', result.data)
def test_offers_show_every(self):
"""Test showing the page of all offers."""
result = self.client.get('/offers_show_every')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Offers', result.data)
@mock.patch('pymongo.collection.Collection.insert_one')
def test_submit_offer(self, mock_insert):
"""Test submitting a new offer. Entry point for route
is called offers_show_all.
"""
result = self.client.post('offers_show', data=sample_form_data)
# After submitting, should redirect to the offers_show page.
self.assertEqual(result.status, '302 FOUND')
mock_insert.assert_called_with(sample_offer)
@mock.patch('pymongo.collection.Collection.find_one')
def test_show_offer(self, mock_find):
"""Test showing a single offer."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Description', result.data)
@mock.patch('pymongo.collection.Collection.find_one')
def test_offers_edit(self, mock_find):
"""Test rendering of the edit offer form."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}/edit')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Edit This Offer', result.data)
@mock.patch('pymongo.collection.Collection.find_one')
def test_edit_offer(self, mock_find):
"""Test submitted an edited offer."""
mock_find.return_value = sample_offer
result = self.client.get(f'/offers/{sample_offer_id}')
self.assertEqual(result.status, '200 OK')
self.assertIn(b'Description', result.data)
@mock.patch('pymongo.collection.Collection.delete_one')
def test_offers_delete(self, mock_delete):
"""Test deletion of an offer."""
form_data = {'_method': 'DELETE'}
result = self.client.post(f'/offers/{sample_offer_id}/delete',
data=form_data)
self.assertEqual(result.status, '302 FOUND')
mock_delete.assert_called_with({'_id': sample_offer_id})
if __name__ == '__main__':
unittest_main()
|
normal
|
{
"blob_id": "cef6b5ef2082dc5910806550d9a9c96357752baf",
"index": 3541,
"step-1": "<mask token>\n\n\nclass HomelyTests(TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Get Flask test client.\"\"\"\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n def test_properties_index(self):\n \"\"\"Test the properties homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Welcome', result.data)\n <mask token>\n\n def test_offers_show_every(self):\n \"\"\"Test showing the page of all offers.\"\"\"\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)\n <mask token>\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_offer(self, mock_find):\n \"\"\"Test showing a single offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n <mask token>\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_offer(self, mock_find):\n \"\"\"Test submitted an edited offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_offers_delete(self, mock_delete):\n \"\"\"Test deletion of an offer.\"\"\"\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete', data\n =form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HomelyTests(TestCase):\n <mask token>\n\n def setUp(self):\n \"\"\"Get Flask test client.\"\"\"\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n def test_properties_index(self):\n \"\"\"Test the properties homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Welcome', result.data)\n\n def test_offers_new(self):\n \"\"\"Test the new offer creation page.\"\"\"\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)\n\n def test_offers_show_every(self):\n \"\"\"Test showing the page of all offers.\"\"\"\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)\n\n @mock.patch('pymongo.collection.Collection.insert_one')\n def test_submit_offer(self, mock_insert):\n \"\"\"Test submitting a new offer. Entry point for route\n is called offers_show_all.\n \"\"\"\n result = self.client.post('offers_show', data=sample_form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_offer(self, mock_find):\n \"\"\"Test showing a single offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_offers_edit(self, mock_find):\n \"\"\"Test rendering of the edit offer form.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_offer(self, mock_find):\n \"\"\"Test submitted an edited offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_offers_delete(self, mock_delete):\n \"\"\"Test deletion of an offer.\"\"\"\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete', data\n =form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass HomelyTests(TestCase):\n \"\"\"Flask tests.\"\"\"\n\n def setUp(self):\n \"\"\"Get Flask test client.\"\"\"\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n def test_properties_index(self):\n \"\"\"Test the properties homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Welcome', result.data)\n\n def test_offers_new(self):\n \"\"\"Test the new offer creation page.\"\"\"\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)\n\n def test_offers_show_every(self):\n \"\"\"Test showing the page of all offers.\"\"\"\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)\n\n @mock.patch('pymongo.collection.Collection.insert_one')\n def test_submit_offer(self, mock_insert):\n \"\"\"Test submitting a new offer. Entry point for route\n is called offers_show_all.\n \"\"\"\n result = self.client.post('offers_show', data=sample_form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_offer(self, mock_find):\n \"\"\"Test showing a single offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_offers_edit(self, mock_find):\n \"\"\"Test rendering of the edit offer form.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_offer(self, mock_find):\n \"\"\"Test submitted an edited offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_offers_delete(self, mock_delete):\n \"\"\"Test deletion of an offer.\"\"\"\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete', data\n =form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})\n\n\n<mask token>\n",
"step-4": "from unittest import TestCase, main as unittest_main, mock\nfrom app import app\nfrom bson.objectid import ObjectId\n<mask token>\nsample_offer_id = ObjectId('5349b4ddd2781d08c09890f4')\nsample_offer = {'name': 'Muhammad Ali', 'offer': '4500', 'email':\n '[email protected]', 'location': 'Fort Worth, TX'}\nsample_form_data = {'name': sample_offer['name'], 'offer': sample_offer[\n 'offer'], 'email': sample_offer['email'], 'location': sample_offer[\n 'location']}\n\n\nclass HomelyTests(TestCase):\n \"\"\"Flask tests.\"\"\"\n\n def setUp(self):\n \"\"\"Get Flask test client.\"\"\"\n self.client = app.test_client()\n app.config['TESTING'] = True\n\n def test_properties_index(self):\n \"\"\"Test the properties homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Welcome', result.data)\n\n def test_offers_new(self):\n \"\"\"Test the new offer creation page.\"\"\"\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)\n\n def test_offers_show_every(self):\n \"\"\"Test showing the page of all offers.\"\"\"\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)\n\n @mock.patch('pymongo.collection.Collection.insert_one')\n def test_submit_offer(self, mock_insert):\n \"\"\"Test submitting a new offer. Entry point for route\n is called offers_show_all.\n \"\"\"\n result = self.client.post('offers_show', data=sample_form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_offer(self, mock_find):\n \"\"\"Test showing a single offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_offers_edit(self, mock_find):\n \"\"\"Test rendering of the edit offer form.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_offer(self, mock_find):\n \"\"\"Test submitted an edited offer.\"\"\"\n mock_find.return_value = sample_offer\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_offers_delete(self, mock_delete):\n \"\"\"Test deletion of an offer.\"\"\"\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete', data\n =form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})\n\n\nif __name__ == '__main__':\n unittest_main()\n",
"step-5": "from unittest import TestCase, main as unittest_main, mock\nfrom app import app\nfrom bson.objectid import ObjectId\n\n'''\ndummy data to use in testing create, update, and delete routes\n(U and D not yet made)\nInspiration taken from Playlister tutorial.\n'''\nsample_offer_id = ObjectId('5349b4ddd2781d08c09890f4')\nsample_offer = {\n 'name': 'Muhammad Ali',\n 'offer': '4500',\n 'email': '[email protected]',\n 'location': 'Fort Worth, TX'\n}\nsample_form_data = {\n 'name': sample_offer['name'],\n 'offer': sample_offer['offer'],\n 'email': sample_offer['email'],\n 'location': sample_offer['location']\n}\n\n\nclass HomelyTests(TestCase):\n \"\"\"Flask tests.\"\"\"\n\n def setUp(self):\n \"\"\"Get Flask test client.\"\"\"\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n def test_properties_index(self):\n \"\"\"Test the properties homepage.\"\"\"\n result = self.client.get('/')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Welcome', result.data)\n\n def test_offers_new(self):\n \"\"\"Test the new offer creation page.\"\"\"\n result = self.client.get('/offers_new')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Make an Offer', result.data)\n\n def test_offers_show_every(self):\n \"\"\"Test showing the page of all offers.\"\"\"\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)\n\n @mock.patch('pymongo.collection.Collection.insert_one')\n def test_submit_offer(self, mock_insert):\n \"\"\"Test submitting a new offer. Entry point for route\n is called offers_show_all.\n \"\"\"\n result = self.client.post('offers_show', data=sample_form_data)\n\n # After submitting, should redirect to the offers_show page.\n self.assertEqual(result.status, '302 FOUND')\n mock_insert.assert_called_with(sample_offer)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_show_offer(self, mock_find):\n \"\"\"Test showing a single offer.\"\"\"\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_offers_edit(self, mock_find):\n \"\"\"Test rendering of the edit offer form.\"\"\"\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}/edit')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Edit This Offer', result.data)\n\n @mock.patch('pymongo.collection.Collection.find_one')\n def test_edit_offer(self, mock_find):\n \"\"\"Test submitted an edited offer.\"\"\"\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)\n\n @mock.patch('pymongo.collection.Collection.delete_one')\n def test_offers_delete(self, mock_delete):\n \"\"\"Test deletion of an offer.\"\"\"\n form_data = {'_method': 'DELETE'}\n result = self.client.post(f'/offers/{sample_offer_id}/delete',\n data=form_data)\n self.assertEqual(result.status, '302 FOUND')\n mock_delete.assert_called_with({'_id': sample_offer_id})\n\n\nif __name__ == '__main__':\n unittest_main()\n",
"step-ids": [
7,
10,
11,
14,
15
]
}
|
[
7,
10,
11,
14,
15
] |
# http://www.dalkescientific.com/writings/diary/archive/2007/10/07/wide_finder.html
'''
Making a faster standard library approach
As I was writing an email to Fredrik describing these results,
I came up with another approach to speeding up the performance, using only the standard library.
Fredrik showed that using a two-level filter, with a quick exclusion test using string operations followed by the regular expression test,
was faster than doing only the regular expression test. Quoting him:
The RE engine does indeed use special code for literal prefixes,
but the superlinear substring search algorithm that was introduced in 2.5 is a lot faster in cases like this, so this simple change gives a noticable speedup.
This works because the only about 20% of the lines in the input file matches the quick test and the simple string test is
% python -m timeit -s 's="This is a test. I was here."*4; t="testXYZ"' 't in s'
10000000 loops, best of 3: 0.194 usec per loop
% python -m timeit -s 'import re;s="This is a test. I was here."*4; t=re.compile("testXYZ")' 't.search(s)'
1000000 loops, best of 3: 0.98 usec per loop
% python -c 'print 0.98/0.194'
5.05154639175
%
roughly 5 times faster than the regular expression test.
My observation was that I can defer the regular expression test until later.
Use the quick string test to find all substrings starting with "GET /ongoing/When/" and ending with the " ".
This will include some extra substrings. Tally all of the substrings, including the false positives.
This will do extra work but the tallying code is very fast.
Once the file has been parsed, post-process the counts dictionary and remove those keys which are not allowed by the regular expression.
This works because there are many duplicate keys. Nearly 50% of the entries which pass the quick string test are duplicates.
The keys in the counts dictionary are unique, which mean only one regular expression test needs to be done, instead of one for each match.
If most of the entries were under /ongoing/When/ and most were unique then these optimizations would be a net slowdown.
You have to understand your data as well as the software in order to figure out how to improve things, and there will be tradeoffs.
Remember also I mentioned that string operations are available for buffer objects?
This means I can do the fast find directly on the memory-mapped file, rather than using a chunk reader.
I'll do the quick search for the leading part of the pattern to search for, then another search for the trailing " " (space) character.
'''
# dalke-wf-10.py fast string ops, mmap, post-process filter
import re, os, mmap
from collections import defaultdict
FILE = "o1000k.ap"
import time, sys
if sys.platform == "win32":
timer = time.clock
else:
timer = time.time
t0, t1 = timer(), time.clock()
pat = re.compile(r"GET /ongoing/When/\d\d\dx/(\d\d\d\d/\d\d/\d\d/[^ .]+) ")
search = pat.search
def count_file(filename):
count = defaultdict(int)
fileobj = open(FILE)
filemap = mmap.mmap(fileobj.fileno(), os.path.getsize(FILE), access=mmap.ACCESS_READ)
i = j = 0
# For the first pass, including everything which is a reasonable match.
# It's faster to count everything and filter later than it is to do
# the filtering now.
while 1:
i = filemap.find("GET /ongoing/When/", j)
if i == -1:
break
j = filemap.find(' ', i+19)
field = filemap[i:j]
count[field] += 1
# The previous code included fields which aren't allowed by the
# regular expression. Filter those which don't match the regexp.
new_count = {}
for k, v in count.iteritems():
# because of the way the key was saved, I didn't keep the
# trailing space. Add it back here so the regexp can be used unchanged.
k = k + " "
m = pat.search(k)
if m:
new_count[m.group(1)] = v
return new_count
count = count_file(FILE)
for key in sorted(count, key=count.get)[:10]:
pass # print "%40s = %s" % (key, count[key])
print timer() - t0, time.clock() - t1
# sanity check
for key in sorted(count, key=count.get)[-10:]:
print "%40s = %s" % (key, count[key])
'''
Variable lookups in module scope are slower than lookups in local scope so I introduced the count_file function to get a bit more speed.
I didn't generate numbers for this one but experience says it's nearly always a performance advantage.
The resulting dalke-wf-10 code finishes in 1.0s. Yes, you read that right. It's faster than the mmap/findall solution of dalke-wf-7.py, which took 1.3s.
Still not as fast as mxTextTools at 0.7s, but this solution uses only the standard library.
'''
|
normal
|
{
"blob_id": "734fd4c492f2fd31a0459e90e5c4a7468120b4cd",
"index": 2369,
"step-1": "# http://www.dalkescientific.com/writings/diary/archive/2007/10/07/wide_finder.html\n'''\nMaking a faster standard library approach\n\nAs I was writing an email to Fredrik describing these results,\nI came up with another approach to speeding up the performance, using only the standard library.\n\nFredrik showed that using a two-level filter, with a quick exclusion test using string operations followed by the regular expression test,\nwas faster than doing only the regular expression test. Quoting him:\n\nThe RE engine does indeed use special code for literal prefixes,\nbut the superlinear substring search algorithm that was introduced in 2.5 is a lot faster in cases like this, so this simple change gives a noticable speedup.\nThis works because the only about 20% of the lines in the input file matches the quick test and the simple string test is\n\n% python -m timeit -s 's=\"This is a test. I was here.\"*4; t=\"testXYZ\"' 't in s'\n10000000 loops, best of 3: 0.194 usec per loop\n% python -m timeit -s 'import re;s=\"This is a test. I was here.\"*4; t=re.compile(\"testXYZ\")' 't.search(s)'\n1000000 loops, best of 3: 0.98 usec per loop\n% python -c 'print 0.98/0.194'\n5.05154639175\n%\n\nroughly 5 times faster than the regular expression test.\nMy observation was that I can defer the regular expression test until later.\nUse the quick string test to find all substrings starting with \"GET /ongoing/When/\" and ending with the \" \".\nThis will include some extra substrings. Tally all of the substrings, including the false positives.\nThis will do extra work but the tallying code is very fast.\nOnce the file has been parsed, post-process the counts dictionary and remove those keys which are not allowed by the regular expression.\n\nThis works because there are many duplicate keys. Nearly 50% of the entries which pass the quick string test are duplicates.\nThe keys in the counts dictionary are unique, which mean only one regular expression test needs to be done, instead of one for each match.\n\nIf most of the entries were under /ongoing/When/ and most were unique then these optimizations would be a net slowdown.\nYou have to understand your data as well as the software in order to figure out how to improve things, and there will be tradeoffs.\n\nRemember also I mentioned that string operations are available for buffer objects?\nThis means I can do the fast find directly on the memory-mapped file, rather than using a chunk reader.\nI'll do the quick search for the leading part of the pattern to search for, then another search for the trailing \" \" (space) character.\n'''\n\n# dalke-wf-10.py fast string ops, mmap, post-process filter\nimport re, os, mmap\nfrom collections import defaultdict\n\nFILE = \"o1000k.ap\"\n\nimport time, sys\nif sys.platform == \"win32\":\n timer = time.clock\nelse:\n timer = time.time\n\nt0, t1 = timer(), time.clock()\n\npat = re.compile(r\"GET /ongoing/When/\\d\\d\\dx/(\\d\\d\\d\\d/\\d\\d/\\d\\d/[^ .]+) \")\nsearch = pat.search\n\n\ndef count_file(filename):\n count = defaultdict(int)\n fileobj = open(FILE)\n filemap = mmap.mmap(fileobj.fileno(), os.path.getsize(FILE), access=mmap.ACCESS_READ)\n i = j = 0\n # For the first pass, including everything which is a reasonable match.\n # It's faster to count everything and filter later than it is to do\n # the filtering now.\n while 1:\n i = filemap.find(\"GET /ongoing/When/\", j)\n if i == -1:\n break\n j = filemap.find(' ', i+19)\n field = filemap[i:j]\n count[field] += 1\n\n # The previous code included fields which aren't allowed by the\n # regular expression. Filter those which don't match the regexp.\n new_count = {}\n for k, v in count.iteritems():\n # because of the way the key was saved, I didn't keep the\n # trailing space. Add it back here so the regexp can be used unchanged.\n k = k + \" \"\n m = pat.search(k)\n if m:\n new_count[m.group(1)] = v\n return new_count\n\n\ncount = count_file(FILE)\n\nfor key in sorted(count, key=count.get)[:10]:\n pass # print \"%40s = %s\" % (key, count[key])\n\nprint timer() - t0, time.clock() - t1\n\n# sanity check\nfor key in sorted(count, key=count.get)[-10:]:\n print \"%40s = %s\" % (key, count[key])\n\n'''\nVariable lookups in module scope are slower than lookups in local scope so I introduced the count_file function to get a bit more speed.\nI didn't generate numbers for this one but experience says it's nearly always a performance advantage.\n\nThe resulting dalke-wf-10 code finishes in 1.0s. Yes, you read that right. It's faster than the mmap/findall solution of dalke-wf-7.py, which took 1.3s.\nStill not as fast as mxTextTools at 0.7s, but this solution uses only the standard library.\n'''",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
class FieldDesigner:
"""
Designs a field for BattleShips, accepts field height and width
"""
def __init__(
self,
):
self.field = []
def design_field(
self,
height,
width,
):
self.field = [[
'~' for __
in range(height)]
for __ in range(width)
]
return self.field
def __str__(
self,
):
return '\n'.join(map(str, self.field))
|
normal
|
{
"blob_id": "c812419e7e024b0bb1207832b2b4a726ef61b272",
"index": 9137,
"step-1": "class FieldDesigner:\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-2": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n <mask token>\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-3": "class FieldDesigner:\n <mask token>\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-4": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n\n def __init__(self):\n self.field = []\n\n def design_field(self, height, width):\n self.field = [['~' for __ in range(height)] for __ in range(width)]\n return self.field\n\n def __str__(self):\n return '\\n'.join(map(str, self.field))\n",
"step-5": "class FieldDesigner:\n \"\"\"\n Designs a field for BattleShips, accepts field height and width\n \"\"\"\n def __init__(\n self,\n ):\n self.field = []\n\n def design_field(\n self,\n height,\n width,\n ):\n\n self.field = [[\n '~' for __\n in range(height)]\n for __ in range(width)\n ]\n\n return self.field\n\n def __str__(\n self,\n ):\n return '\\n'.join(map(str, self.field))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding: utf-8
etc_dictionary = {
'2 30대': '이삼십대',
'20~30대': '이삼십대',
'20, 30대': '이십대 삼십대',
'1+1': '원플러스원',
'3에서 6개월인': '3개월에서 육개월인',
}
english_dictionary = {
'Devsisters': '데브시스터즈',
'track': '트랙',
# krbook
'LA': '엘에이',
'LG': '엘지',
'KOREA': '코리아',
'JSA': '제이에스에이',
'PGA': '피지에이',
'GA': '지에이',
'idol': '아이돌',
'KTX': '케이티엑스',
'AC': '에이씨',
'DVD': '디비디',
'US': '유에스',
'CNN': '씨엔엔',
'LPGA': '엘피지에이',
'P': '피',
'L': '엘',
'T': '티',
'B': '비',
'C': '씨',
'BIFF': '비아이에프에프',
'GV': '지비',
# JTBC
'IT': '아이티',
'IQ': '아이큐',
'JTBC': '제이티비씨',
'trickle down effect': '트리클 다운 이펙트',
'trickle up effect': '트리클 업 이펙트',
'down': '다운',
'up': '업',
'FCK': '에프씨케이',
'AP': '에이피',
'WHERETHEWILDTHINGSARE': '',
'Rashomon Effect': '',
'O': '오',
'OO': '오오',
'B': '비',
'GDP': '지디피',
'CIPA': '씨아이피에이',
'YS': '와이에스',
'Y': '와이',
'S': '에스',
'JTBC': '제이티비씨',
'PC': '피씨',
'bill': '빌',
'Halmuny': '하모니', #####
'X': '엑스',
'SNS': '에스엔에스',
'ability': '어빌리티',
'shy': '',
'CCTV': '씨씨티비',
'IT': '아이티',
'the tenth man': '더 텐쓰 맨', ####
'L': '엘',
'PC': '피씨',
'YSDJJPMB': '', ########
'Content Attitude Timing': '컨텐트 애티튜드 타이밍',
'CAT': '캣',
'IS': '아이에스',
'SNS': '에스엔에스',
'K': '케이',
'Y': '와이',
'KDI': '케이디아이',
'DOC': '디오씨',
'CIA': '씨아이에이',
'PBS': '피비에스',
'D': '디',
'PPropertyPositionPowerPrisonP'
'S': '에스',
'francisco': '프란시스코',
'I': '아이',
'III': '아이아이', ######
'No joke': '노 조크',
'BBK': '비비케이',
'LA': '엘에이',
'Don': '',
't worry be happy': ' 워리 비 해피',
'NO': '엔오', #####
'it was our sky': '잇 워즈 아워 스카이',
'it is our sky': '잇 이즈 아워 스카이', ####
'NEIS': '엔이아이에스', #####
'IMF': '아이엠에프',
'apology': '어폴로지',
'humble': '험블',
'M': '엠',
'Nowhere Man': '노웨어 맨',
'The Tenth Man': '더 텐쓰 맨',
'PBS': '피비에스',
'BBC': '비비씨',
'MRJ': '엠알제이',
'CCTV': '씨씨티비',
'Pick me up': '픽 미 업',
'DNA': '디엔에이',
'UN': '유엔',
'STOP': '스탑', #####
'PRESS': '프레스', #####
'not to be': '낫 투비',
'Denial': '디나이얼',
'G': '지',
'IMF': '아이엠에프',
'GDP': '지디피',
'JTBC': '제이티비씨',
'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우',
'DDT': '디디티',
'AI': '에이아이',
'Z': '제트',
'OECD': '오이씨디',
'N': '앤',
'A': '에이',
'MB': '엠비',
'EH': '이에이치',
'IS': '아이에스',
'TV': '티비',
'MIT': '엠아이티',
'KBO': '케이비오',
'I love America': '아이 러브 아메리카',
'SF': '에스에프',
'Q': '큐',
'KFX': '케이에프엑스',
'PM': '피엠',
'Prime Minister': '프라임 미니스터',
'Swordline': '스워드라인',
'TBS': '티비에스',
'DDT': '디디티',
'CS': '씨에스',
'Reflecting Absence': '리플렉팅 앱센스',
'PBS': '피비에스',
'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',
'negative pressure': '네거티브 프레셔',
'F': '에프',
'KIA': '기아',
'FTA': '에프티에이',
'Que sais-je': '',
'UFC': '유에프씨',
'P': '피',
'DJ': '디제이',
'Chaebol': '채벌',
'BBC': '비비씨',
'OECD': '오이씨디',
'BC': '삐씨',
'C': '씨',
'B': '씨',
'KY': '케이와이',
'K': '케이',
'CEO': '씨이오',
'YH': '와이에치',
'IS': '아이에스',
'who are you': '후 얼 유',
'Y': '와이',
'The Devils Advocate': '더 데빌즈 어드보카트',
'YS': '와이에스',
'so sorry': '쏘 쏘리',
'Santa': '산타',
'Big Endian': '빅 엔디안',
'Small Endian': '스몰 엔디안',
'Oh Captain My Captain': '오 캡틴 마이 캡틴',
'AIB': '에이아이비',
'K': '케이',
'PBS': '피비에스',
}
|
normal
|
{
"blob_id": "ccd1e57518065963158984dda52297db45ce204e",
"index": 2471,
"step-1": "<mask token>\n",
"step-2": "etc_dictionary = {'2 30대': '이삼십대', '20~30대': '이삼십대', '20, 30대': '이십대 삼십대',\n '1+1': '원플러스원', '3에서 6개월인': '3개월에서 육개월인'}\nenglish_dictionary = {'Devsisters': '데브시스터즈', 'track': '트랙', 'LA': '엘에이',\n 'LG': '엘지', 'KOREA': '코리아', 'JSA': '제이에스에이', 'PGA': '피지에이', 'GA': '지에이',\n 'idol': '아이돌', 'KTX': '케이티엑스', 'AC': '에이씨', 'DVD': '디비디', 'US': '유에스',\n 'CNN': '씨엔엔', 'LPGA': '엘피지에이', 'P': '피', 'L': '엘', 'T': '티', 'B': '비',\n 'C': '씨', 'BIFF': '비아이에프에프', 'GV': '지비', 'IT': '아이티', 'IQ': '아이큐',\n 'JTBC': '제이티비씨', 'trickle down effect': '트리클 다운 이펙트',\n 'trickle up effect': '트리클 업 이펙트', 'down': '다운', 'up': '업', 'FCK':\n '에프씨케이', 'AP': '에이피', 'WHERETHEWILDTHINGSARE': '', 'Rashomon Effect':\n '', 'O': '오', 'OO': '오오', 'B': '비', 'GDP': '지디피', 'CIPA': '씨아이피에이',\n 'YS': '와이에스', 'Y': '와이', 'S': '에스', 'JTBC': '제이티비씨', 'PC': '피씨', 'bill':\n '빌', 'Halmuny': '하모니', 'X': '엑스', 'SNS': '에스엔에스', 'ability': '어빌리티',\n 'shy': '', 'CCTV': '씨씨티비', 'IT': '아이티', 'the tenth man': '더 텐쓰 맨', 'L':\n '엘', 'PC': '피씨', 'YSDJJPMB': '', 'Content Attitude Timing':\n '컨텐트 애티튜드 타이밍', 'CAT': '캣', 'IS': '아이에스', 'SNS': '에스엔에스', 'K': '케이',\n 'Y': '와이', 'KDI': '케이디아이', 'DOC': '디오씨', 'CIA': '씨아이에이', 'PBS': '피비에스',\n 'D': '디', 'PPropertyPositionPowerPrisonPS': '에스', 'francisco': '프란시스코',\n 'I': '아이', 'III': '아이아이', 'No joke': '노 조크', 'BBK': '비비케이', 'LA': '엘에이',\n 'Don': '', 't worry be happy': ' 워리 비 해피', 'NO': '엔오', 'it was our sky':\n '잇 워즈 아워 스카이', 'it is our sky': '잇 이즈 아워 스카이', 'NEIS': '엔이아이에스', 'IMF':\n '아이엠에프', 'apology': '어폴로지', 'humble': '험블', 'M': '엠', 'Nowhere Man':\n '노웨어 맨', 'The Tenth Man': '더 텐쓰 맨', 'PBS': '피비에스', 'BBC': '비비씨', 'MRJ':\n '엠알제이', 'CCTV': '씨씨티비', 'Pick me up': '픽 미 업', 'DNA': '디엔에이', 'UN':\n '유엔', 'STOP': '스탑', 'PRESS': '프레스', 'not to be': '낫 투비', 'Denial':\n '디나이얼', 'G': '지', 'IMF': '아이엠에프', 'GDP': '지디피', 'JTBC': '제이티비씨',\n 'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우', 'DDT': '디디티', 'AI':\n '에이아이', 'Z': '제트', 'OECD': '오이씨디', 'N': '앤', 'A': '에이', 'MB': '엠비',\n 'EH': '이에이치', 'IS': '아이에스', 'TV': '티비', 'MIT': '엠아이티', 'KBO': '케이비오',\n 'I love America': '아이 러브 아메리카', 'SF': '에스에프', 'Q': '큐', 'KFX': '케이에프엑스',\n 'PM': '피엠', 'Prime Minister': '프라임 미니스터', 'Swordline': '스워드라인', 'TBS':\n '티비에스', 'DDT': '디디티', 'CS': '씨에스', 'Reflecting Absence': '리플렉팅 앱센스',\n 'PBS': '피비에스', 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',\n 'negative pressure': '네거티브 프레셔', 'F': '에프', 'KIA': '기아', 'FTA': '에프티에이',\n 'Que sais-je': '', 'UFC': '유에프씨', 'P': '피', 'DJ': '디제이', 'Chaebol':\n '채벌', 'BBC': '비비씨', 'OECD': '오이씨디', 'BC': '삐씨', 'C': '씨', 'B': '씨',\n 'KY': '케이와이', 'K': '케이', 'CEO': '씨이오', 'YH': '와이에치', 'IS': '아이에스',\n 'who are you': '후 얼 유', 'Y': '와이', 'The Devils Advocate': '더 데빌즈 어드보카트',\n 'YS': '와이에스', 'so sorry': '쏘 쏘리', 'Santa': '산타', 'Big Endian': '빅 엔디안',\n 'Small Endian': '스몰 엔디안', 'Oh Captain My Captain': '오 캡틴 마이 캡틴', 'AIB':\n '에이아이비', 'K': '케이', 'PBS': '피비에스'}\n",
"step-3": "# coding: utf-8\r\n\r\netc_dictionary = {\r\n '2 30대': '이삼십대',\r\n '20~30대': '이삼십대',\r\n '20, 30대': '이십대 삼십대',\r\n '1+1': '원플러스원',\r\n '3에서 6개월인': '3개월에서 육개월인',\r\n}\r\n\r\nenglish_dictionary = {\r\n 'Devsisters': '데브시스터즈',\r\n 'track': '트랙',\r\n\r\n # krbook\r\n 'LA': '엘에이',\r\n 'LG': '엘지',\r\n 'KOREA': '코리아',\r\n 'JSA': '제이에스에이',\r\n 'PGA': '피지에이',\r\n 'GA': '지에이',\r\n 'idol': '아이돌',\r\n 'KTX': '케이티엑스',\r\n 'AC': '에이씨',\r\n 'DVD': '디비디',\r\n 'US': '유에스',\r\n 'CNN': '씨엔엔',\r\n 'LPGA': '엘피지에이',\r\n 'P': '피',\r\n 'L': '엘',\r\n 'T': '티',\r\n 'B': '비',\r\n 'C': '씨',\r\n 'BIFF': '비아이에프에프',\r\n 'GV': '지비',\r\n\r\n # JTBC\r\n 'IT': '아이티',\r\n 'IQ': '아이큐',\r\n 'JTBC': '제이티비씨',\r\n 'trickle down effect': '트리클 다운 이펙트',\r\n 'trickle up effect': '트리클 업 이펙트',\r\n 'down': '다운',\r\n 'up': '업',\r\n 'FCK': '에프씨케이',\r\n 'AP': '에이피',\r\n 'WHERETHEWILDTHINGSARE': '',\r\n 'Rashomon Effect': '',\r\n 'O': '오',\r\n 'OO': '오오',\r\n 'B': '비',\r\n 'GDP': '지디피',\r\n 'CIPA': '씨아이피에이',\r\n 'YS': '와이에스',\r\n 'Y': '와이',\r\n 'S': '에스',\r\n 'JTBC': '제이티비씨',\r\n 'PC': '피씨',\r\n 'bill': '빌',\r\n 'Halmuny': '하모니', #####\r\n 'X': '엑스',\r\n 'SNS': '에스엔에스',\r\n 'ability': '어빌리티',\r\n 'shy': '',\r\n 'CCTV': '씨씨티비',\r\n 'IT': '아이티',\r\n 'the tenth man': '더 텐쓰 맨', ####\r\n 'L': '엘',\r\n 'PC': '피씨',\r\n 'YSDJJPMB': '', ########\r\n 'Content Attitude Timing': '컨텐트 애티튜드 타이밍',\r\n 'CAT': '캣',\r\n 'IS': '아이에스',\r\n 'SNS': '에스엔에스',\r\n 'K': '케이',\r\n 'Y': '와이',\r\n 'KDI': '케이디아이',\r\n 'DOC': '디오씨',\r\n 'CIA': '씨아이에이',\r\n 'PBS': '피비에스',\r\n 'D': '디',\r\n 'PPropertyPositionPowerPrisonP'\r\n 'S': '에스',\r\n 'francisco': '프란시스코',\r\n 'I': '아이',\r\n 'III': '아이아이', ######\r\n 'No joke': '노 조크',\r\n 'BBK': '비비케이',\r\n 'LA': '엘에이',\r\n 'Don': '',\r\n 't worry be happy': ' 워리 비 해피',\r\n 'NO': '엔오', #####\r\n 'it was our sky': '잇 워즈 아워 스카이',\r\n 'it is our sky': '잇 이즈 아워 스카이', ####\r\n 'NEIS': '엔이아이에스', #####\r\n 'IMF': '아이엠에프',\r\n 'apology': '어폴로지',\r\n 'humble': '험블',\r\n 'M': '엠',\r\n 'Nowhere Man': '노웨어 맨',\r\n 'The Tenth Man': '더 텐쓰 맨',\r\n 'PBS': '피비에스',\r\n 'BBC': '비비씨',\r\n 'MRJ': '엠알제이',\r\n 'CCTV': '씨씨티비',\r\n 'Pick me up': '픽 미 업',\r\n 'DNA': '디엔에이',\r\n 'UN': '유엔',\r\n 'STOP': '스탑', #####\r\n 'PRESS': '프레스', #####\r\n 'not to be': '낫 투비',\r\n 'Denial': '디나이얼',\r\n 'G': '지',\r\n 'IMF': '아이엠에프',\r\n 'GDP': '지디피',\r\n 'JTBC': '제이티비씨',\r\n 'Time flies like an arrow': '타임 플라이즈 라이크 언 애로우',\r\n 'DDT': '디디티',\r\n 'AI': '에이아이',\r\n 'Z': '제트',\r\n 'OECD': '오이씨디',\r\n 'N': '앤',\r\n 'A': '에이',\r\n 'MB': '엠비',\r\n 'EH': '이에이치',\r\n 'IS': '아이에스',\r\n 'TV': '티비',\r\n 'MIT': '엠아이티',\r\n 'KBO': '케이비오',\r\n 'I love America': '아이 러브 아메리카',\r\n 'SF': '에스에프',\r\n 'Q': '큐',\r\n 'KFX': '케이에프엑스',\r\n 'PM': '피엠',\r\n 'Prime Minister': '프라임 미니스터',\r\n 'Swordline': '스워드라인',\r\n 'TBS': '티비에스',\r\n 'DDT': '디디티',\r\n 'CS': '씨에스',\r\n 'Reflecting Absence': '리플렉팅 앱센스',\r\n 'PBS': '피비에스',\r\n 'Drum being beaten by everyone': '드럼 빙 비튼 바이 에브리원',\r\n 'negative pressure': '네거티브 프레셔',\r\n 'F': '에프',\r\n 'KIA': '기아',\r\n 'FTA': '에프티에이',\r\n 'Que sais-je': '',\r\n 'UFC': '유에프씨',\r\n 'P': '피',\r\n 'DJ': '디제이',\r\n 'Chaebol': '채벌',\r\n 'BBC': '비비씨',\r\n 'OECD': '오이씨디',\r\n 'BC': '삐씨',\r\n 'C': '씨',\r\n 'B': '씨',\r\n 'KY': '케이와이',\r\n 'K': '케이',\r\n 'CEO': '씨이오',\r\n 'YH': '와이에치',\r\n 'IS': '아이에스',\r\n 'who are you': '후 얼 유',\r\n 'Y': '와이',\r\n 'The Devils Advocate': '더 데빌즈 어드보카트',\r\n 'YS': '와이에스',\r\n 'so sorry': '쏘 쏘리',\r\n 'Santa': '산타',\r\n 'Big Endian': '빅 엔디안',\r\n 'Small Endian': '스몰 엔디안',\r\n 'Oh Captain My Captain': '오 캡틴 마이 캡틴',\r\n 'AIB': '에이아이비',\r\n 'K': '케이',\r\n 'PBS': '피비에스',\r\n}\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
import webapp2
import requests
import requests_toolbelt.adapters.appengine
from . import mongodb
import datetime
from bson.json_util import dumps
class RestHandler(webapp2.RequestHandler):
def dispatch(self):
# time.sleep(1)
super(RestHandler, self).dispatch()
def send_json(self, content):
self.response.headers['content-type'] = 'application/json'
self.response.write(content)
def update_trello_card(self, id, params):
card_url = 'https://api.trello.com/1/cards/' + id
return requests.put(card_url, params=params)
def create_trello_card(self, params):
card_url = 'https://api.trello.com/1/cards'
return requests.post(card_url, params=params)
def delete_trello_card(self, id, params):
card_url = 'https://api.trello.com/1/cards/' + id
return requests.delete(card_url, params=params)
class TasksHandler(RestHandler):
def get(self):
board_id = self.request.get('board_id')
data = mongodb.list_where_value_matches('tasks', 'boards', board_id)
self.send_json(dumps(data))
class CreateTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
phase_id = payload['phase']['projectManager']['listId']
team_phase_id = payload['phase']['team']['listId']
params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.create_trello_card(params)
payload['projectManagementTrelloId'] = json.loads(response.text)['id']
if team_phase_id is not None:
response = self.create_trello_card(team_params)
print response
payload['teamTrelloId'] = json.loads(response.text)['id']
payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()
mongodb.create(payload, 'tasks')
self.send_json(dumps(response.text))
class UpdateTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
phase_id = payload['phase']['projectManager']['listId']
team_phase_id = payload['phase']['team']['listId']
params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.update_trello_card(payload['projectManagementTrelloId'], params)
if team_phase_id is not None:
if payload.get('teamTrelloId') is None:
response = self.create_trello_card(team_params)
payload['teamTrelloId'] = json.loads(response.text)['id']
else:
response = self.update_trello_card(payload['teamTrelloId'], team_params)
else:
if payload['phase']['team']['phase'] is None:
params = { 'key': key, 'token': token }
response = self.delete_trello_card(payload['teamTrelloId'], params)
payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()
mongodb.update(payload, payload['_id'], 'tasks')
self.send_json(dumps(response.text))
class DeleteTaskHandler(RestHandler):
def post(self):
key = '61cf04749fda864dd404009216cbe106'
token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'
payload = json.loads(self.request.body)
params = { 'key': key, 'token': token }
requests_toolbelt.adapters.appengine.monkeypatch()
response = self.delete_trello_card(payload['projectManagementTrelloId'], params)
if payload.get('teamTrelloId') is not None:
response = self.delete_trello_card(payload['teamTrelloId'], params)
mongodb.delete(payload['_id']['$oid'], 'tasks')
self.send_json(dumps(response.text))
APP = webapp2.WSGIApplication([
('/rest/tasks', TasksHandler),
('/rest/task/create', CreateTaskHandler),
('/rest/task/update', UpdateTaskHandler),
('/rest/task/delete', DeleteTaskHandler)
], debug=True)
|
normal
|
{
"blob_id": "8af3bb1b33a01353cd7f26c9496485e36d954edb",
"index": 5362,
"step-1": "import json\n\nimport webapp2\n\nimport requests\n\nimport requests_toolbelt.adapters.appengine\n\nfrom . import mongodb\n\nimport datetime\n\nfrom bson.json_util import dumps\n\nclass RestHandler(webapp2.RequestHandler):\n\n def dispatch(self):\n # time.sleep(1)\n super(RestHandler, self).dispatch()\n\n def send_json(self, content):\n self.response.headers['content-type'] = 'application/json'\n self.response.write(content)\n\n def update_trello_card(self, id, params):\n card_url = 'https://api.trello.com/1/cards/' + id\n return requests.put(card_url, params=params)\n\n def create_trello_card(self, params):\n card_url = 'https://api.trello.com/1/cards'\n return requests.post(card_url, params=params)\n\n def delete_trello_card(self, id, params):\n card_url = 'https://api.trello.com/1/cards/' + id\n return requests.delete(card_url, params=params)\n\nclass TasksHandler(RestHandler):\n\n def get(self):\n board_id = self.request.get('board_id')\n data = mongodb.list_where_value_matches('tasks', 'boards', board_id)\n self.send_json(dumps(data))\n\nclass CreateTaskHandler(RestHandler):\n def post(self):\n key = '61cf04749fda864dd404009216cbe106'\n token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'\n payload = json.loads(self.request.body)\n\n phase_id = payload['phase']['projectManager']['listId']\n team_phase_id = payload['phase']['team']['listId']\n params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }\n team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }\n\n requests_toolbelt.adapters.appengine.monkeypatch()\n response = self.create_trello_card(params)\n payload['projectManagementTrelloId'] = json.loads(response.text)['id']\n if team_phase_id is not None:\n response = self.create_trello_card(team_params)\n print response\n payload['teamTrelloId'] = json.loads(response.text)['id']\n\n payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()\n mongodb.create(payload, 'tasks')\n\n self.send_json(dumps(response.text))\n\nclass UpdateTaskHandler(RestHandler):\n def post(self):\n key = '61cf04749fda864dd404009216cbe106'\n token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'\n payload = json.loads(self.request.body)\n\n phase_id = payload['phase']['projectManager']['listId']\n team_phase_id = payload['phase']['team']['listId']\n params = { 'idList': phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }\n team_params = { 'idList': team_phase_id, 'name': payload['name'], 'desc': payload['desc'], 'key': key, 'token': token }\n\n requests_toolbelt.adapters.appengine.monkeypatch()\n response = self.update_trello_card(payload['projectManagementTrelloId'], params)\n if team_phase_id is not None:\n if payload.get('teamTrelloId') is None:\n response = self.create_trello_card(team_params)\n payload['teamTrelloId'] = json.loads(response.text)['id']\n else:\n response = self.update_trello_card(payload['teamTrelloId'], team_params)\n else:\n if payload['phase']['team']['phase'] is None:\n params = { 'key': key, 'token': token }\n response = self.delete_trello_card(payload['teamTrelloId'], params)\n\n payload['dateLastActivity'] = datetime.datetime.utcnow().isoformat()\n mongodb.update(payload, payload['_id'], 'tasks')\n\n self.send_json(dumps(response.text))\n\nclass DeleteTaskHandler(RestHandler):\n def post(self):\n key = '61cf04749fda864dd404009216cbe106'\n token = '2caecaa0245326fcc4b949a4780ad7fdcb8cd8d77b4394ad8590d244dbfa542f'\n payload = json.loads(self.request.body)\n\n params = { 'key': key, 'token': token }\n\n requests_toolbelt.adapters.appengine.monkeypatch()\n response = self.delete_trello_card(payload['projectManagementTrelloId'], params)\n if payload.get('teamTrelloId') is not None:\n response = self.delete_trello_card(payload['teamTrelloId'], params)\n\n mongodb.delete(payload['_id']['$oid'], 'tasks')\n\n self.send_json(dumps(response.text))\n\n\nAPP = webapp2.WSGIApplication([\n ('/rest/tasks', TasksHandler),\n ('/rest/task/create', CreateTaskHandler),\n ('/rest/task/update', UpdateTaskHandler),\n ('/rest/task/delete', DeleteTaskHandler)\n], debug=True)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
__author__ = "Prikly Grayp"
__license__ = "MIT"
__version__ = "1.0.0"
__email__ = "[email protected]"
__status__ = "Development"
from contextlib import closing
class RefrigeratorRaider:
'''Raid a refrigerator'''
def open(self):
print('Open fridge door.')
def take(self, food):
print('Finding {}...'.format(food))
if food == 'deep fried pizza':
raise RuntimeError('Health warning!')
print('Taking {}'.format(food))
def close(self):
print('Close fridg door.')
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
raid('bacon')
raid('deep fried pizza')
|
normal
|
{
"blob_id": "7455eb670c2c019b8d066fcc6f2878a2136b7fd0",
"index": 5051,
"step-1": "<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-3": "__author__ = 'Prikly Grayp'\n__license__ = 'MIT'\n__version__ = '1.0.0'\n__email__ = '[email protected]'\n__status__ = 'Development'\n<mask token>\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-4": "__author__ = 'Prikly Grayp'\n__license__ = 'MIT'\n__version__ = '1.0.0'\n__email__ = '[email protected]'\n__status__ = 'Development'\nfrom contextlib import closing\n\n\nclass RefrigeratorRaider:\n \"\"\"Raid a refrigerator\"\"\"\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\n\nraid('bacon')\nraid('deep fried pizza')\n",
"step-5": "__author__ = \"Prikly Grayp\"\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\nfrom contextlib import closing\n\nclass RefrigeratorRaider:\n '''Raid a refrigerator'''\n\n def open(self):\n print('Open fridge door.')\n\n def take(self, food):\n print('Finding {}...'.format(food))\n if food == 'deep fried pizza':\n raise RuntimeError('Health warning!')\n print('Taking {}'.format(food))\n\n def close(self):\n print('Close fridg door.')\n\ndef raid(food):\n with closing(RefrigeratorRaider()) as r:\n r.open()\n r.take(food)\n\nraid('bacon')\nraid('deep fried pizza')",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
from typing import List
"""
1. Generate an array containing the products of all elements to the left of current element
2. Similarly, start from the last element and generate an array containing the products to the right of each element
3. Multiply both arrays element-wise
"""
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
output = []
prod = 1
# First generate the products to the left of the current element
for num in nums:
output.append(prod)
prod *= num
prod = 1
# Now, generate and multiply the product to the right of current element
for k in range(len(nums) - 1, -1, -1):
output[k] = output[k] * prod
prod *= nums[k]
return output
|
normal
|
{
"blob_id": "26ae44b5be1d78ed3fe9c858413ae47e163c5460",
"index": 1282,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums: List[int]) ->List[int]:\n output = []\n prod = 1\n for num in nums:\n output.append(prod)\n prod *= num\n prod = 1\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n return output\n",
"step-4": "from typing import List\n<mask token>\n\n\nclass Solution:\n\n def productExceptSelf(self, nums: List[int]) ->List[int]:\n output = []\n prod = 1\n for num in nums:\n output.append(prod)\n prod *= num\n prod = 1\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n return output\n",
"step-5": "from typing import List\n\n\"\"\"\n1. Generate an array containing the products of all elements to the left of current element\n2. Similarly, start from the last element and generate an array containing the products to the right of each element\n3. Multiply both arrays element-wise\n\n\"\"\"\n\n\nclass Solution:\n def productExceptSelf(self, nums: List[int]) -> List[int]:\n output = []\n prod = 1\n # First generate the products to the left of the current element\n for num in nums:\n output.append(prod)\n prod *= num\n\n prod = 1\n # Now, generate and multiply the product to the right of current element\n for k in range(len(nums) - 1, -1, -1):\n output[k] = output[k] * prod\n prod *= nums[k]\n\n return output\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import random
import uuid
from collections import OrderedDict
import docker
from .db_utils import DBUtils
from .models import DynamicDockerChallenge
class DockerUtils:
@staticmethod
def add_new_docker_container(user_id, challenge_id, flag, port):
configs = DBUtils.get_all_configs()
dynamic_docker_challenge = DynamicDockerChallenge.query \
.filter(DynamicDockerChallenge.id == challenge_id) \
.first_or_404()
client = docker.DockerClient(base_url=configs.get("docker_api_url"))
in_port = dynamic_docker_challenge.redirect_port
ports = {str(in_port):str(port)}
uuid_code = str(uuid.uuid4())
try:
client.containers.run(image=dynamic_docker_challenge.docker_image, name=str(user_id) + '-' + uuid_code,
environment={'FLAG': flag}, detach=True,
mem_limit=dynamic_docker_challenge.memory_limit,
nano_cpus=int(dynamic_docker_challenge.cpu_limit * 1e9), auto_remove=True, ports=ports)
DBUtils.create_new_container(user_id, challenge_id, flag, uuid_code, port)
return True
except:
DBUtils.remove_current_container(user_id)
DockerUtils.remove_current_docker_container(user_id)
return False
@staticmethod
def remove_current_docker_container(user_id, is_retry=False):
configs = DBUtils.get_all_configs()
container = DBUtils.get_current_containers(user_id=user_id)
auto_containers = configs.get("docker_auto_connect_containers", "").split(",")
if container is None:
return
try:
client = docker.DockerClient(base_url=configs.get("docker_api_url"))
networks = client.networks.list(names=[str(user_id) + '-' + container.uuid])
if len(networks) == 0:
containers = client.containers.list(filters={'name': str(user_id) + '-' + container.uuid})
for c in containers:
c.remove(force=True)
else:
containers = client.containers.list(filters={'label': str(user_id) + '-' + container.uuid})
for c in containers:
c.remove(force=True)
for n in networks:
for ac in auto_containers:
n.disconnect(ac)
n.remove()
except:
if not is_retry:
DockerUtils.remove_current_docker_container(user_id, True)
|
normal
|
{
"blob_id": "e2e2e746d0a8f6b01e6f54e930c7def2d48c2d62",
"index": 4653,
"step-1": "<mask token>\n\n\nclass DockerUtils:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DockerUtils:\n <mask token>\n\n @staticmethod\n def remove_current_docker_container(user_id, is_retry=False):\n configs = DBUtils.get_all_configs()\n container = DBUtils.get_current_containers(user_id=user_id)\n auto_containers = configs.get('docker_auto_connect_containers', ''\n ).split(',')\n if container is None:\n return\n try:\n client = docker.DockerClient(base_url=configs.get('docker_api_url')\n )\n networks = client.networks.list(names=[str(user_id) + '-' +\n container.uuid])\n if len(networks) == 0:\n containers = client.containers.list(filters={'name': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n else:\n containers = client.containers.list(filters={'label': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n for n in networks:\n for ac in auto_containers:\n n.disconnect(ac)\n n.remove()\n except:\n if not is_retry:\n DockerUtils.remove_current_docker_container(user_id, True)\n",
"step-3": "<mask token>\n\n\nclass DockerUtils:\n\n @staticmethod\n def add_new_docker_container(user_id, challenge_id, flag, port):\n configs = DBUtils.get_all_configs()\n dynamic_docker_challenge = DynamicDockerChallenge.query.filter(\n DynamicDockerChallenge.id == challenge_id).first_or_404()\n client = docker.DockerClient(base_url=configs.get('docker_api_url'))\n in_port = dynamic_docker_challenge.redirect_port\n ports = {str(in_port): str(port)}\n uuid_code = str(uuid.uuid4())\n try:\n client.containers.run(image=dynamic_docker_challenge.\n docker_image, name=str(user_id) + '-' + uuid_code,\n environment={'FLAG': flag}, detach=True, mem_limit=\n dynamic_docker_challenge.memory_limit, nano_cpus=int(\n dynamic_docker_challenge.cpu_limit * 1000000000.0),\n auto_remove=True, ports=ports)\n DBUtils.create_new_container(user_id, challenge_id, flag,\n uuid_code, port)\n return True\n except:\n DBUtils.remove_current_container(user_id)\n DockerUtils.remove_current_docker_container(user_id)\n return False\n\n @staticmethod\n def remove_current_docker_container(user_id, is_retry=False):\n configs = DBUtils.get_all_configs()\n container = DBUtils.get_current_containers(user_id=user_id)\n auto_containers = configs.get('docker_auto_connect_containers', ''\n ).split(',')\n if container is None:\n return\n try:\n client = docker.DockerClient(base_url=configs.get('docker_api_url')\n )\n networks = client.networks.list(names=[str(user_id) + '-' +\n container.uuid])\n if len(networks) == 0:\n containers = client.containers.list(filters={'name': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n else:\n containers = client.containers.list(filters={'label': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n for n in networks:\n for ac in auto_containers:\n n.disconnect(ac)\n n.remove()\n except:\n if not is_retry:\n DockerUtils.remove_current_docker_container(user_id, True)\n",
"step-4": "import json\nimport random\nimport uuid\nfrom collections import OrderedDict\nimport docker\nfrom .db_utils import DBUtils\nfrom .models import DynamicDockerChallenge\n\n\nclass DockerUtils:\n\n @staticmethod\n def add_new_docker_container(user_id, challenge_id, flag, port):\n configs = DBUtils.get_all_configs()\n dynamic_docker_challenge = DynamicDockerChallenge.query.filter(\n DynamicDockerChallenge.id == challenge_id).first_or_404()\n client = docker.DockerClient(base_url=configs.get('docker_api_url'))\n in_port = dynamic_docker_challenge.redirect_port\n ports = {str(in_port): str(port)}\n uuid_code = str(uuid.uuid4())\n try:\n client.containers.run(image=dynamic_docker_challenge.\n docker_image, name=str(user_id) + '-' + uuid_code,\n environment={'FLAG': flag}, detach=True, mem_limit=\n dynamic_docker_challenge.memory_limit, nano_cpus=int(\n dynamic_docker_challenge.cpu_limit * 1000000000.0),\n auto_remove=True, ports=ports)\n DBUtils.create_new_container(user_id, challenge_id, flag,\n uuid_code, port)\n return True\n except:\n DBUtils.remove_current_container(user_id)\n DockerUtils.remove_current_docker_container(user_id)\n return False\n\n @staticmethod\n def remove_current_docker_container(user_id, is_retry=False):\n configs = DBUtils.get_all_configs()\n container = DBUtils.get_current_containers(user_id=user_id)\n auto_containers = configs.get('docker_auto_connect_containers', ''\n ).split(',')\n if container is None:\n return\n try:\n client = docker.DockerClient(base_url=configs.get('docker_api_url')\n )\n networks = client.networks.list(names=[str(user_id) + '-' +\n container.uuid])\n if len(networks) == 0:\n containers = client.containers.list(filters={'name': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n else:\n containers = client.containers.list(filters={'label': str(\n user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n for n in networks:\n for ac in auto_containers:\n n.disconnect(ac)\n n.remove()\n except:\n if not is_retry:\n DockerUtils.remove_current_docker_container(user_id, True)\n",
"step-5": "import json\nimport random\nimport uuid\nfrom collections import OrderedDict\n\nimport docker\nfrom .db_utils import DBUtils\nfrom .models import DynamicDockerChallenge\n\n\nclass DockerUtils:\n\n @staticmethod\n def add_new_docker_container(user_id, challenge_id, flag, port):\n configs = DBUtils.get_all_configs()\n\n dynamic_docker_challenge = DynamicDockerChallenge.query \\\n .filter(DynamicDockerChallenge.id == challenge_id) \\\n .first_or_404()\n\n client = docker.DockerClient(base_url=configs.get(\"docker_api_url\"))\n in_port = dynamic_docker_challenge.redirect_port\n ports = {str(in_port):str(port)}\n uuid_code = str(uuid.uuid4())\n\n try:\n client.containers.run(image=dynamic_docker_challenge.docker_image, name=str(user_id) + '-' + uuid_code,\n environment={'FLAG': flag}, detach=True,\n mem_limit=dynamic_docker_challenge.memory_limit,\n nano_cpus=int(dynamic_docker_challenge.cpu_limit * 1e9), auto_remove=True, ports=ports)\n DBUtils.create_new_container(user_id, challenge_id, flag, uuid_code, port)\n return True\n except:\n DBUtils.remove_current_container(user_id)\n DockerUtils.remove_current_docker_container(user_id)\n return False\n \n \n @staticmethod\n def remove_current_docker_container(user_id, is_retry=False):\n configs = DBUtils.get_all_configs()\n container = DBUtils.get_current_containers(user_id=user_id)\n\n auto_containers = configs.get(\"docker_auto_connect_containers\", \"\").split(\",\")\n\n if container is None:\n return\n\n try:\n client = docker.DockerClient(base_url=configs.get(\"docker_api_url\"))\n networks = client.networks.list(names=[str(user_id) + '-' + container.uuid])\n\n if len(networks) == 0:\n containers = client.containers.list(filters={'name': str(user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n else:\n containers = client.containers.list(filters={'label': str(user_id) + '-' + container.uuid})\n for c in containers:\n c.remove(force=True)\n\n for n in networks:\n for ac in auto_containers:\n n.disconnect(ac)\n n.remove()\n\n\n except:\n if not is_retry:\n DockerUtils.remove_current_docker_container(user_id, True)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import contextlib
import dask
import dask.array as da
import packaging.version
import pandas
import six
import sklearn
SK_VERSION = packaging.version.parse(sklearn.__version__)
DASK_VERSION = packaging.version.parse(dask.__version__)
PANDAS_VERSION = packaging.version.parse(pandas.__version__)
@contextlib.contextmanager
def dummy_context(*args, **kwargs):
yield
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping # noqa
if DASK_VERSION < packaging.version.parse("1.1.0"):
blockwise = da.atop
else:
blockwise = da.blockwise
|
normal
|
{
"blob_id": "1bdb19373960e4f63d80d6ab73ec3c0939e40b7f",
"index": 364,
"step-1": "<mask token>\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n",
"step-3": "<mask token>\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n",
"step-4": "import contextlib\nimport dask\nimport dask.array as da\nimport packaging.version\nimport pandas\nimport six\nimport sklearn\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping\nif DASK_VERSION < packaging.version.parse('1.1.0'):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n",
"step-5": "import contextlib\n\nimport dask\nimport dask.array as da\nimport packaging.version\nimport pandas\nimport six\nimport sklearn\n\nSK_VERSION = packaging.version.parse(sklearn.__version__)\nDASK_VERSION = packaging.version.parse(dask.__version__)\nPANDAS_VERSION = packaging.version.parse(pandas.__version__)\n\n\[email protected]\ndef dummy_context(*args, **kwargs):\n yield\n\n\nif six.PY2:\n from collections import Mapping\nelse:\n from collections.abc import Mapping # noqa\n\nif DASK_VERSION < packaging.version.parse(\"1.1.0\"):\n blockwise = da.atop\nelse:\n blockwise = da.blockwise\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RDt(RPackage):
"""A Wrapper of the JavaScript Library 'DataTables'.
Data objects in R can be rendered as HTML tables using the JavaScript
library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'
library has been included in this R package. The package name 'DT' is an
abbreviation of 'DataTables'."""
cran = "DT"
version("0.23", sha256="360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70")
version("0.20", sha256="c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f")
version("0.17", sha256="e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56")
version("0.13", sha256="79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5")
version("0.8", sha256="90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21")
version("0.7", sha256="1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c")
version("0.6", sha256="2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916")
version("0.4", sha256="3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19")
version("0.3", sha256="ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb")
version("0.2", sha256="a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd")
version("0.1", sha256="129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756")
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"))
depends_on("[email protected]:", type=("build", "run"), when="@0.8:")
depends_on("r-magrittr", type=("build", "run"))
depends_on("r-crosstalk", type=("build", "run"))
depends_on("r-jquerylib", type=("build", "run"), when="@0.19:")
depends_on("r-promises", type=("build", "run"), when="@0.5:")
|
normal
|
{
"blob_id": "c88e2336432f93d95b4e2285aa532b673a4a410b",
"index": 1095,
"step-1": "<mask token>\n\n\nclass RDt(RPackage):\n <mask token>\n <mask token>\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"step-2": "<mask token>\n\n\nclass RDt(RPackage):\n <mask token>\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"step-3": "<mask token>\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"step-4": "from spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n cran = 'DT'\n version('0.23', sha256=\n '360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70')\n version('0.20', sha256=\n 'c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f')\n version('0.17', sha256=\n 'e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56')\n version('0.13', sha256=\n '79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5')\n version('0.8', sha256=\n '90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21')\n version('0.7', sha256=\n '1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c')\n version('0.6', sha256=\n '2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916')\n version('0.4', sha256=\n '3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19')\n version('0.3', sha256=\n 'ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb')\n version('0.2', sha256=\n 'a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd')\n version('0.1', sha256=\n '129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756')\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'))\n depends_on('[email protected]:', type=('build', 'run'), when='@0.8:')\n depends_on('r-magrittr', type=('build', 'run'))\n depends_on('r-crosstalk', type=('build', 'run'))\n depends_on('r-jquerylib', type=('build', 'run'), when='@0.19:')\n depends_on('r-promises', type=('build', 'run'), when='@0.5:')\n",
"step-5": "# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass RDt(RPackage):\n \"\"\"A Wrapper of the JavaScript Library 'DataTables'.\n\n Data objects in R can be rendered as HTML tables using the JavaScript\n library 'DataTables' (typically via R Markdown or Shiny). The 'DataTables'\n library has been included in this R package. The package name 'DT' is an\n abbreviation of 'DataTables'.\"\"\"\n\n cran = \"DT\"\n\n version(\"0.23\", sha256=\"360ae2fcb1141125a1b16448570fc37d14c4dd3f78a872c26df4fda1787cdc70\")\n version(\"0.20\", sha256=\"c66d7f49ec101fdbb91c6d26c06fb1373f9ebdefe29fe99f2ae1a641220aba9f\")\n version(\"0.17\", sha256=\"e3430292421dcc2b6ad5f2deda729f0603da4eb31f86d071833e6e11abf3fb56\")\n version(\"0.13\", sha256=\"79a073fe96980ce150d790ab76133c9e80bd463270c34d149c03934a622d63b5\")\n version(\"0.8\", sha256=\"90195054148806cf31c7db5c41f72d5389c75adc0b1183606a9babd2c6ae8e21\")\n version(\"0.7\", sha256=\"1de3f170deccd9e3aaefc057dd87c498e3b3f7f88eff645cf165ac34ffe3de2c\")\n version(\"0.6\", sha256=\"2ed68e9d161559171fa74b6105eee87b98acf755eae072b38ada60a83d427916\")\n version(\"0.4\", sha256=\"3daa96b819ca54e5fbc2c7d78cb3637982a2d44be58cea0683663b71cfc7fa19\")\n version(\"0.3\", sha256=\"ef42b24c9ea6cfa1ce089687bf858d773ac495dc80756d4475234e979bd437eb\")\n version(\"0.2\", sha256=\"a1b7f9e5c31a241fdf78ac582499f346e915ff948554980bbc2262c924b806bd\")\n version(\"0.1\", sha256=\"129bdafededbdcc3279d63b16f00c885b215f23cab2edfe33c9cbe177c8c4756\")\n\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"))\n depends_on(\"[email protected]:\", type=(\"build\", \"run\"), when=\"@0.8:\")\n depends_on(\"r-magrittr\", type=(\"build\", \"run\"))\n depends_on(\"r-crosstalk\", type=(\"build\", \"run\"))\n depends_on(\"r-jquerylib\", type=(\"build\", \"run\"), when=\"@0.19:\")\n depends_on(\"r-promises\", type=(\"build\", \"run\"), when=\"@0.5:\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
# -*-coding:utf-8 -*-
import smtplib
import MySQLdb
import datetime
import types
def sendEmail(sender,passwd,host,port,receivers,date,mail) :
message = MIMEText(mail, 'html', 'utf-8')
message['From'] = Header("告警发送者<"+sender+">", 'utf-8')
subject = str(date) + '服务器告警通知'
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL(host,port)
smtpObj.ehlo()
smtpObj.login(sender,passwd)
smtpObj.sendmail(sender, receivers, message.as_string())
smtpObj.quit()
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
if __name__ == '__main__' :
sender = '[email protected]'
passwd = '@Chuck20110923'
host = 'smtp.exmail.qq.com'
port = 465
receivers = ['[email protected]','[email protected]']
daytime = (datetime.date.today() - datetime.timedelta(days=1) ). strftime('%Y%m%d')
mail = '服务器问题警报!!!'
sendEmail(sender,passwd,host,port,receivers,daytime,mail)
|
normal
|
{
"blob_id": "221a75d37fbb49e8508fc786ee8e6e90b19e12c0",
"index": 4683,
"step-1": "#!/usr/bin/python\n# -*-coding:utf-8 -*-\nimport smtplib\nimport MySQLdb\nimport datetime\nimport types\ndef sendEmail(sender,passwd,host,port,receivers,date,mail) :\n message = MIMEText(mail, 'html', 'utf-8')\n message['From'] = Header(\"告警发送者<\"+sender+\">\", 'utf-8')\n subject = str(date) + '服务器告警通知'\n message['Subject'] = Header(subject, 'utf-8')\n try:\n smtpObj = smtplib.SMTP_SSL(host,port)\n smtpObj.ehlo()\n smtpObj.login(sender,passwd)\n smtpObj.sendmail(sender, receivers, message.as_string())\n smtpObj.quit()\n print \"邮件发送成功\"\n except smtplib.SMTPException:\n print \"Error: 无法发送邮件\"\nif __name__ == '__main__' :\n sender = '[email protected]'\n passwd = '@Chuck20110923'\n host = 'smtp.exmail.qq.com'\n port = 465\n receivers = ['[email protected]','[email protected]']\n daytime = (datetime.date.today() - datetime.timedelta(days=1) ). strftime('%Y%m%d')\n mail = '服务器问题警报!!!'\n sendEmail(sender,passwd,host,port,receivers,daytime,mail)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField,SubmitField, PasswordField, RadioField, MultipleFileField, SubmitField, TextAreaField
from wtforms.fields.html5 import EmailField, TelField, DateField
from wtforms.validators import DataRequired, Email, Length, InputRequired
class SignUpForm(FlaskForm):
id = StringField('ID*', validators=[DataRequired()])
fname = StringField('Full Name*', validators=[DataRequired()])
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
password = PasswordField('Password*', validators=[DataRequired()])
contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(min=10, max=10)])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Sign Up >>')
class LoginForm(FlaskForm):
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
password = PasswordField('Password*', validators=[DataRequired()])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Login >>')
class ForgotForm(FlaskForm):
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Change your Password')
class changepassword(FlaskForm):
password = PasswordField('Enter Password', validators=[DataRequired()])
submit = SubmitField('Change Password')
class ComplaintForm(FlaskForm):
fname = StringField('Full Name *', validators=[DataRequired()])
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
date = DateField('Date', validators=[DataRequired()])
degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), ('masters', 'Masters')], validators=[DataRequired()])
semester = SelectField(u'Semester*', choices=[('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), ('fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), ('eighth', 'Eighth')], validators=[DataRequired()])
complaintcategory = SelectField(u'Complain Category*', choices=[('infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), ('academics', 'Academics'), ('management', 'Management'), ('faculty', 'Faculty'), ('library', 'Library')], validators=[DataRequired()])
message = TextAreaField('Enter Complain Details', validators=[DataRequired(), Length(max=100)])
#file = MultipleFileField(u'Upload File')
submit = SubmitField('Submit')
class complaint_status(FlaskForm):
status = SelectField(u'Complaint Status', choices=[('Pending', 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')])
submit = SubmitField('Update')
|
normal
|
{
"blob_id": "32ed07a89a6f929a6c4b78fd79e687b85e01015b",
"index": 535,
"step-1": "<mask token>\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"step-2": "<mask token>\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"step-3": "<mask token>\n\n\nclass SignUpForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"step-4": "<mask token>\n\n\nclass SignUpForm(FlaskForm):\n id = StringField('ID*', validators=[DataRequired()])\n fname = StringField('Full Name*', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(\n min=10, max=10)])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Sign Up >>')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"step-5": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SelectField,SubmitField, PasswordField, RadioField, MultipleFileField, SubmitField, TextAreaField\nfrom wtforms.fields.html5 import EmailField, TelField, DateField\nfrom wtforms.validators import DataRequired, Email, Length, InputRequired\n\nclass SignUpForm(FlaskForm):\n id = StringField('ID*', validators=[DataRequired()])\n fname = StringField('Full Name*', validators=[DataRequired()])\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(min=10, max=10)])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Sign Up >>')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), ('masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), ('fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), ('eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[('infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), ('academics', 'Academics'), ('management', 'Management'), ('faculty', 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[DataRequired(), Length(max=100)])\n #file = MultipleFileField(u'Upload File')\n submit = SubmitField('Submit')\n\n\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending', 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')])\n submit = SubmitField('Update')\n",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
from django.contrib import admin
from .models import Hash
admin.site.register(Hash)
|
normal
|
{
"blob_id": "e2e4adaa8f7f62662e0c2915faff1bed72986351",
"index": 1084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Hash)\n",
"step-3": "from django.contrib import admin\nfrom .models import Hash\nadmin.site.register(Hash)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def multiply(num1, num2):
return num1 * num2
|
normal
|
{
"blob_id": "e835e75f444e97ca948ce27504cc9149ea0092f6",
"index": 1946,
"step-1": "<mask token>\n",
"step-2": "def multiply(num1, num2):\n return num1 * num2\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import praw
import config
from imgurpython import ImgurClient
import datetime
from time import sleep
def respond_to_comment(comment, album_user, album_url, num_images, num_gifs):
body = "Here is an album of all unique image/gif posts made by " \
"[{user}]({album_url}). ({num_images} images" \
")".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)
comment.reply(body)
return
def create_album(user, imgur_client, reddit_client):
album = imgur_client.create_album({"title": user.name, "privacy": "hidden"})
urls = []
images = []
for submission in reddit_client.redditor(user.name).submissions.top("all"):
if not submission.is_self and submission.url not in urls:
urls.append(submission.url)
try:
image = imgur_client.upload_from_url(submission.url, config=None, anon=False)
images.append(image["id"])
# Sleep command to avoid exceeding rate limit
# 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds
sleep(8)
except:
None
if len(images) > 0:
imgur_client.album_add_images(album["id"], images)
return album["id"]
def update_album(user, imgur_client, reddit_client):
return
def is_image(url):
return True
def is_gif(url):
return True
def run_bot():
reddit = praw.Reddit(
client_id=config.CLIENT_ID_REDDIT,
client_secret=config.SECRET_CODE_REDDIT,
user_agent=config.USER_AGENT_REDDIT,
username=config.USERNAME_REDDIT,
password=config.PASSWORD_REDDIT
)
client=ImgurClient(
client_id=config.CLIENT_ID_IMGUR,
client_secret=config.SECRET_CODE_IMGUR,
access_token=config.ACCESS_TOKEN_IMGUR,
refresh_token=config.REFRESH_TOKEN_IMGUR
)
login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()
print('Bot Initiation Successful')
print("Logged in at: {time}".format(time = login_time))
print("Logged into Reddit as: {user}".format(user=reddit.user.me().name))
print("Logged into Imgur as: {imgur_user}".format(imgur_user=""))
print("{api_calls} Imgur API calls remaining for the day.".format(api_calls=client.credits["ClientRemaining"]))
print("----------")
default_url = "https://imgur.com/"
command_call = '!compile-album'
subreddit = reddit.subreddit("all")
for comment in subreddit.stream.comments():
if command_call in comment.body and comment.created_utc > login_time:
parent_id = comment.parent_id
if parent_id[0:3] == "t1_":
parent_comment = reddit.comment(id=parent_id[3:])
album_id = create_album(parent_comment.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)
elif parent_id[0:3] == "t3_":
parent_submission = reddit.submission(id=parent_id[3:])
album_id = create_album(parent_submission.author, client, reddit)
album = client.get_album(album_id)
respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)
run_bot()
|
normal
|
{
"blob_id": "ca009022832963934230e356f9ea9eaedac7378b",
"index": 1745,
"step-1": "<mask token>\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\n<mask token>\n\n\ndef is_gif(url):\n return True\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-4": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = (\n 'Here is an album of all unique image/gif posts made by [{user}]({album_url}). ({num_images} images)'\n .format(user=album_user.name, album_url=album_url, num_images=\n num_images, num_gifs=num_gifs))\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({'title': user.name, 'privacy': 'hidden'}\n )\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top('all'):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config\n =None, anon=False)\n images.append(image['id'])\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album['id'], images)\n return album['id']\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(client_id=config.CLIENT_ID_REDDIT, client_secret=\n config.SECRET_CODE_REDDIT, user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT, password=config.PASSWORD_REDDIT)\n client = ImgurClient(client_id=config.CLIENT_ID_IMGUR, client_secret=\n config.SECRET_CODE_IMGUR, access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR)\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print('Logged in at: {time}'.format(time=login_time))\n print('Logged into Reddit as: {user}'.format(user=reddit.user.me().name))\n print('Logged into Imgur as: {imgur_user}'.format(imgur_user=''))\n print('{api_calls} Imgur API calls remaining for the day.'.format(\n api_calls=client.credits['ClientRemaining']))\n print('----------')\n default_url = 'https://imgur.com/'\n command_call = '!compile-album'\n subreddit = reddit.subreddit('all')\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == 't1_':\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.\n link, album.images_count, 0)\n elif parent_id[0:3] == 't3_':\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client,\n reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album\n .link, album.images_count, 0)\n\n\nrun_bot()\n",
"step-5": "import praw\nimport config\nfrom imgurpython import ImgurClient\nimport datetime\nfrom time import sleep\n\n\ndef respond_to_comment(comment, album_user, album_url, num_images, num_gifs):\n body = \"Here is an album of all unique image/gif posts made by \" \\\n \"[{user}]({album_url}). ({num_images} images\" \\\n \")\".format(user=album_user.name, album_url=album_url, num_images=num_images, num_gifs=num_gifs)\n comment.reply(body)\n return\n\n\ndef create_album(user, imgur_client, reddit_client):\n album = imgur_client.create_album({\"title\": user.name, \"privacy\": \"hidden\"})\n urls = []\n images = []\n for submission in reddit_client.redditor(user.name).submissions.top(\"all\"):\n if not submission.is_self and submission.url not in urls:\n urls.append(submission.url)\n try:\n image = imgur_client.upload_from_url(submission.url, config=None, anon=False)\n images.append(image[\"id\"])\n # Sleep command to avoid exceeding rate limit\n # 86400 seconds per day / 12500 requests per day = 1 request every 6.9 seconds\n sleep(8)\n except:\n None\n if len(images) > 0:\n imgur_client.album_add_images(album[\"id\"], images)\n return album[\"id\"]\n\n\ndef update_album(user, imgur_client, reddit_client):\n return\n\n\ndef is_image(url):\n return True\n\n\ndef is_gif(url):\n return True\n\n\ndef run_bot():\n reddit = praw.Reddit(\n client_id=config.CLIENT_ID_REDDIT,\n client_secret=config.SECRET_CODE_REDDIT,\n user_agent=config.USER_AGENT_REDDIT,\n username=config.USERNAME_REDDIT,\n password=config.PASSWORD_REDDIT\n )\n\n client=ImgurClient(\n client_id=config.CLIENT_ID_IMGUR,\n client_secret=config.SECRET_CODE_IMGUR,\n access_token=config.ACCESS_TOKEN_IMGUR,\n refresh_token=config.REFRESH_TOKEN_IMGUR\n )\n login_time = datetime.datetime.now(datetime.timezone.utc).timestamp()\n print('Bot Initiation Successful')\n print(\"Logged in at: {time}\".format(time = login_time))\n print(\"Logged into Reddit as: {user}\".format(user=reddit.user.me().name))\n print(\"Logged into Imgur as: {imgur_user}\".format(imgur_user=\"\"))\n print(\"{api_calls} Imgur API calls remaining for the day.\".format(api_calls=client.credits[\"ClientRemaining\"]))\n print(\"----------\")\n default_url = \"https://imgur.com/\"\n command_call = '!compile-album'\n subreddit = reddit.subreddit(\"all\")\n for comment in subreddit.stream.comments():\n if command_call in comment.body and comment.created_utc > login_time:\n parent_id = comment.parent_id\n if parent_id[0:3] == \"t1_\":\n parent_comment = reddit.comment(id=parent_id[3:])\n album_id = create_album(parent_comment.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_comment.author, album.link, album.images_count, 0)\n elif parent_id[0:3] == \"t3_\":\n parent_submission = reddit.submission(id=parent_id[3:])\n album_id = create_album(parent_submission.author, client, reddit)\n album = client.get_album(album_id)\n respond_to_comment(comment, parent_submission.author, album.link, album.images_count, 0)\n\n\nrun_bot()",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
sum_value = 0
for _ in range(5):
sum_value += int(input())
print(sum_value)
|
normal
|
{
"blob_id": "4add80894036e0395a6e6eb13e8a2db0d963de8c",
"index": 9654,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-3": "sum_value = 0\nfor _ in range(5):\n sum_value += int(input())\nprint(sum_value)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
a=raw_input("Enter the column\n")
b=raw_input("Enter the row\n")
i=0
k=0
m=0
c=""
d=""
while (m<int(b)):
while(i<int(a)):
c=c+" "
for j in xrange(1,4):
c=c+"-"
i=i+1
while(k<int(a)):
d=d+"|"
for l in xrange(1,4):
d=d+" "
k=k+1
m=m+1
print c
print d+"|"
print c
|
normal
|
{
"blob_id": "c28d7fc45be9a6efa7b7ef00520898c3d238ac63",
"index": 5518,
"step-1": "a=raw_input(\"Enter the column\\n\")\nb=raw_input(\"Enter the row\\n\")\ni=0\nk=0\nm=0\nc=\"\"\nd=\"\"\nwhile (m<int(b)):\n while(i<int(a)):\n c=c+\" \"\n for j in xrange(1,4):\n c=c+\"-\"\n i=i+1\n while(k<int(a)):\n d=d+\"|\"\n for l in xrange(1,4):\n d=d+\" \"\n k=k+1\n m=m+1\n print c\n print d+\"|\"\nprint c\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import csv
import os
from collections import namedtuple
from typing import List, Dict
from config import *
HEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue']
Assembly_Stats = namedtuple('Assembly_Stats', HEADER)
dir = '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'
def read_assembly_file(file: str) -> List:
if not os.path.isfile(file):
return [-1, -1, -1, -1, -1, -1]
with open(file, 'r') as f:
file_content_string = f.read()
if 'LKH_Contigs:\nLKH_Objective' in file_content_string:
lkh_gaps = -1
else:
lkh_gaps = len(file_content_string.split('LKH_Contigs:\n')[1].split('\nLKH_Objective')[0].split('\n')) - 1
lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[1].split('\n')[0])
lkh_time = float(file_content_string.split('LKH_Time: ')[1].split('\n')[0])
if 'AP_Contigs:\nAP_Objective' in file_content_string:
ap_gaps = -1
else:
ap_gaps = len(file_content_string.split('AP_Contigs:\n')[1].split('\nAP_Objective')[0].split('\n')) - 1
ap_value = int(file_content_string.split('AP_Objective_Value: ')[1].split('\n')[0])
ap_time = float(file_content_string.split('AP_Time: ')[1].split('\n')[0])
return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]
def read_fasta_stats_file(file: str) -> Dict:
with open(file, 'r') as f:
file_content_string = f.read()
actual_objective_value = int(file_content_string.split('Objective function value: ')[1].split('\n')[0])
actual_gaps = int(file_content_string.split('Actual gaps: ')[1].split('\n')[0])
no_of_reads = int(file_content_string.split('Number of reads: ')[1].split('\n')[0])
return [no_of_reads, actual_objective_value, actual_gaps]
# def write_assembly_stats(assembly_stats_list: List[Assembly_Stats]) -> None:
# with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
# f_csv = csv.writer(f, delimiter=',')
# f_csv.writerow(
# ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue'])
# for elem in assembly_stats_list:
# f_csv.writerow(elem)
def write_assembly_stats(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
f_csv.writerow(
['Genome', 'Coverage', 'AvgLength', 'Reads', 'ActualValue', 'ActualGaps',
'CalignLKHValue', 'CalignLKHGaps', 'CalignLKHTime',
'CalignAPValue', 'CalignAPGaps', 'CalignAPTime',
'CalignALKHValue', 'CalignALKHGaps', 'CalignALKHTime',
'CalignAAPValue', 'CalignAAPGaps', 'CalignAAPTime',
'CalignBLKHValue', 'CalignBLKHGaps', 'CalignBLKHTime',
'CalignBAPValue', 'CalignBAPGaps', 'CalignBAPTime',
])
for ref_name in [ref1_name, ref2_name, ref3_name]:
for c in coverages:
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [ref_name, c, length]
row += val['Actual']
row += val['Calign']
row += val['Calign25']
row += val['Calign50']
f_csv.writerow(row)
def write_assembly_stats_tex(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex', 'w') as f:
for ref_name in [ref1_name, ref2_name, ref3_name]:
if ref1_name == ref_name:
dashline_active = ''
else:
dashline_active = '\\hdashline\n'
f.write('{}\\bfseries {}\\\\\n'.format(dashline_active, ref_name))
for c in coverages:
f.write('$c = {}$\\\\\n'.format(c))
for length in average_length_list:
val = stats_dict[(ref_name, c, length)]
row = [length]
row += [val['Actual'][0]]
row += ['']
row += val['Actual'][1:]
row += ['']
row += [*val['Calign'][0:2], '{0:.2f}'.format(val['Calign'][2]), *val['Calign'][3:5],
'{0:.2f}'.format(val['Calign'][5])]
row += ['']
row += [*val['Calign25'][0:2], '{0:.2f}'.format(val['Calign25'][2]), *val['Calign25'][3:5],
'{0:.2f}'.format(val['Calign25'][5])]
row += ['']
row += [*val['Calign50'][0:2], '{0:.2f}'.format(val['Calign50'][2]), *val['Calign50'][3:5],
'{0:.2f}'.format(val['Calign50'][5])]
f.write(' & '.join([str(x) for x in row]) + '\\\\\n')
def write_assembly_stats2(statsdict: Dict) -> None:
with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv', 'w') as f:
f_csv = csv.writer(f, delimiter=',')
refs = [ref1_name, ref2_name]
f_csv.writerow(range(len(refs) * 9))
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][0] for ref_name in refs for c in
coverages for l in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][1] for ref_name in refs for c in
coverages for l
in average_length_list])
f_csv.writerow(
[stats_dict[(ref_name, c, l)]['Actual'][2] for ref_name in refs for c in
coverages for l
in average_length_list])
for foo in ['Calign', 'Calign25', 'Calign50']:
for i in range(6):
if i in [2, 5]:
f_csv.writerow(
['{0:.2f}'.format(stats_dict[(ref_name, c, l)][foo][i]) for ref_name in refs for c in
coverages
for l in average_length_list])
else:
f_csv.writerow(
[stats_dict[(ref_name, c, l)][foo][i] for ref_name in refs for c in
coverages
for l in average_length_list])
assembly_stats_list = []
stats_dict = {}
# for dir in sorted(glob.glob('/home/andreas/GDrive/workspace/sparsedata/ref[1,2,3]_c[5,20,40]*/')):
for ref_number in [1, 2, 3]:
for coverage in coverages:
for length in average_length_list:
# file_sub_dir = dir.split('/')[-2] # example ref1_c5_l100
# ref_number = int(file_sub_dir.split('ref')[1].split('_')[0])
ref_name = references[ref_number - 1]
# coverage = int(file_sub_dir.split('_c')[1].split('_')[0])
# length = int(file_sub_dir.split('_l')[1])
dir = '/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'.format(ref_number, coverage, length)
stats_dict[(ref_name, coverage, length)] = {'Actual': read_fasta_stats_file(dir + 'fasta.stat'),
'Calign': read_assembly_file(dir + 'calign.assembly'),
'Calign25': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 4)),
'Calign50': read_assembly_file(
dir + 'calign_0_{}.assembly'.format(length // 2))}
# dir = '{}-{}-{}'.format(references[ref_number - 1], coverage, length)
# assembly_stats_list.append(
# Assembly_Stats(dir, len(lkh_contigs), lkh_value, lkh_time, len(ap_contigs), ap_value, ap_time,
# actual_Objective_value))
def write_whole_stats() -> None:
headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP', 'CalignBLKH',
'CalignBAP']
vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,
'CalignBAP': 0}
with open(DIR + 'assembly_stats.csv', 'r') as f:
f_csv = csv.DictReader(f, delimiter=',')
for row in f_csv:
for elem in headers:
if row['ActualValue'] == row[elem + 'Value']:
vals[elem] += 1
if row['ActualGaps'] == row[elem + 'Gaps']:
gaps[elem] += 1
if row['ActualValue'] == row[elem + 'Value'] and row['ActualGaps'] == row[elem + 'Gaps']:
both[elem] += 1
if row['CalignLKHValue'] == row['CalignAPValue']:
atspvsapval['CalignLKH'] += 1
atspvsapval['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue']:
atspvsapval['CalignALKH'] += 1
atspvsapval['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue']:
atspvsapval['CalignBLKH'] += 1
atspvsapval['CalignBAP'] += 1
if row['CalignLKHValue'] == row['CalignAPValue'] and row['CalignLKHGaps'] == row['CalignAPGaps']:
atspvsap['CalignLKH'] += 1
atspvsap['CalignAP'] += 1
if row['CalignALKHValue'] == row['CalignAAPValue'] and row['CalignALKHGaps'] == row['CalignAAPGaps']:
atspvsap['CalignALKH'] += 1
atspvsap['CalignAAP'] += 1
if row['CalignBLKHValue'] == row['CalignBAPValue'] and row['CalignBLKHGaps'] == row['CalignBAPGaps']:
atspvsap['CalignBLKH'] += 1
atspvsap['CalignBAP'] += 1
with open(DIR + 'complete_stats.csv', 'w') as g:
g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)
g_csv.writeheader()
g_csv.writerow(vals)
g_csv.writerow(gaps)
g_csv.writerow(both)
g_csv.writerow(atspvsapval)
g_csv.writerow(atspvsap)
write_assembly_stats(stats_dict)
write_assembly_stats2(stats_dict)
write_assembly_stats_tex(stats_dict)
write_whole_stats()
|
normal
|
{
"blob_id": "edd98e3996b0fce46d33dd33340018ab5b029637",
"index": 2333,
"step-1": "<mask token>\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\n<mask token>\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-3": "<mask token>\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs',\n 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\ndir = (\n '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n )\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-4": "import csv\nimport os\nfrom collections import namedtuple\nfrom typing import List, Dict\nfrom config import *\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs',\n 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\ndir = (\n '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n )\n\n\ndef read_assembly_file(file: str) ->List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].\n split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[\n 1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split(\n '\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].\n split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1]\n .split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split(\n '\\n')[0])\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) ->Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split(\n 'Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].\n split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1]\n .split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\ndef write_assembly_stats(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(['Genome', 'Coverage', 'AvgLength', 'Reads',\n 'ActualValue', 'ActualGaps', 'CalignLKHValue', 'CalignLKHGaps',\n 'CalignLKHTime', 'CalignAPValue', 'CalignAPGaps',\n 'CalignAPTime', 'CalignALKHValue', 'CalignALKHGaps',\n 'CalignALKHTime', 'CalignAAPValue', 'CalignAAPGaps',\n 'CalignAAPTime', 'CalignBLKHValue', 'CalignBLKHGaps',\n 'CalignBLKHTime', 'CalignBAPValue', 'CalignBAPGaps',\n 'CalignBAPTime'])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex',\n 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[ref_name, c, length]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val[\n 'Calign'][2]), *val['Calign'][3:5], '{0:.2f}'.\n format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val[\n 'Calign25'][2]), *val['Calign25'][3:5], '{0:.2f}'.\n format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val[\n 'Calign50'][2]), *val['Calign50'][3:5], '{0:.2f}'.\n format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) ->None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv',\n 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][0] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][1] for\n ref_name in refs for c in coverages for l in average_length_list])\n f_csv.writerow([stats_dict[ref_name, c, l]['Actual'][2] for\n ref_name in refs for c in coverages for l in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(['{0:.2f}'.format(stats_dict[ref_name, c,\n l][foo][i]) for ref_name in refs for c in coverages for\n l in average_length_list])\n else:\n f_csv.writerow([stats_dict[ref_name, c, l][foo][i] for\n ref_name in refs for c in coverages for l in\n average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n ref_name = references[ref_number - 1]\n dir = ('/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'\n .format(ref_number, coverage, length))\n stats_dict[ref_name, coverage, length] = {'Actual':\n read_fasta_stats_file(dir + 'fasta.stat'), 'Calign':\n read_assembly_file(dir + 'calign.assembly'), 'Calign25':\n read_assembly_file(dir + 'calign_0_{}.assembly'.format(\n length // 4)), 'Calign50': read_assembly_file(dir +\n 'calign_0_{}.assembly'.format(length // 2))}\n\n\ndef write_whole_stats() ->None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP',\n 'CalignBLKH', 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0,\n 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0,\n 'CalignAAP': 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP':\n 0, 'CalignBLKH': 0, 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row[\n 'ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row[\n 'CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row[\n 'CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row[\n 'CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-5": "import csv\nimport os\nfrom collections import namedtuple\nfrom typing import List, Dict\n\nfrom config import *\n\nHEADER = ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue']\nAssembly_Stats = namedtuple('Assembly_Stats', HEADER)\n\ndir = '/home/andreas/GDrive/workspace/sparsedata/ref1shuffled_c5_l700/calign.assembly'\n\n\ndef read_assembly_file(file: str) -> List:\n if not os.path.isfile(file):\n return [-1, -1, -1, -1, -1, -1]\n with open(file, 'r') as f:\n file_content_string = f.read()\n if 'LKH_Contigs:\\nLKH_Objective' in file_content_string:\n lkh_gaps = -1\n else:\n lkh_gaps = len(file_content_string.split('LKH_Contigs:\\n')[1].split('\\nLKH_Objective')[0].split('\\n')) - 1\n lkh_value = int(file_content_string.split('LKH_Objective_Value: ')[1].split('\\n')[0])\n lkh_time = float(file_content_string.split('LKH_Time: ')[1].split('\\n')[0])\n if 'AP_Contigs:\\nAP_Objective' in file_content_string:\n ap_gaps = -1\n else:\n ap_gaps = len(file_content_string.split('AP_Contigs:\\n')[1].split('\\nAP_Objective')[0].split('\\n')) - 1\n ap_value = int(file_content_string.split('AP_Objective_Value: ')[1].split('\\n')[0])\n ap_time = float(file_content_string.split('AP_Time: ')[1].split('\\n')[0])\n\n return [lkh_value, lkh_gaps, lkh_time, ap_value, ap_gaps, ap_time]\n\n\ndef read_fasta_stats_file(file: str) -> Dict:\n with open(file, 'r') as f:\n file_content_string = f.read()\n actual_objective_value = int(file_content_string.split('Objective function value: ')[1].split('\\n')[0])\n actual_gaps = int(file_content_string.split('Actual gaps: ')[1].split('\\n')[0])\n no_of_reads = int(file_content_string.split('Number of reads: ')[1].split('\\n')[0])\n return [no_of_reads, actual_objective_value, actual_gaps]\n\n\n# def write_assembly_stats(assembly_stats_list: List[Assembly_Stats]) -> None:\n# with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:\n# f_csv = csv.writer(f, delimiter=',')\n# f_csv.writerow(\n# ['File', 'LKHContigs', 'LKHValue', 'LKHTime', 'APContigs', 'APValue', 'APTime', 'ActualObjectiveValue'])\n# for elem in assembly_stats_list:\n# f_csv.writerow(elem)\n\ndef write_assembly_stats(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.csv', 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n f_csv.writerow(\n ['Genome', 'Coverage', 'AvgLength', 'Reads', 'ActualValue', 'ActualGaps',\n 'CalignLKHValue', 'CalignLKHGaps', 'CalignLKHTime',\n 'CalignAPValue', 'CalignAPGaps', 'CalignAPTime',\n 'CalignALKHValue', 'CalignALKHGaps', 'CalignALKHTime',\n 'CalignAAPValue', 'CalignAAPGaps', 'CalignAAPTime',\n 'CalignBLKHValue', 'CalignBLKHGaps', 'CalignBLKHTime',\n 'CalignBAPValue', 'CalignBAPGaps', 'CalignBAPTime',\n ])\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n for c in coverages:\n for length in average_length_list:\n val = stats_dict[(ref_name, c, length)]\n row = [ref_name, c, length]\n row += val['Actual']\n row += val['Calign']\n row += val['Calign25']\n row += val['Calign50']\n\n f_csv.writerow(row)\n\n\ndef write_assembly_stats_tex(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats.tex', 'w') as f:\n for ref_name in [ref1_name, ref2_name, ref3_name]:\n if ref1_name == ref_name:\n dashline_active = ''\n else:\n dashline_active = '\\\\hdashline\\n'\n f.write('{}\\\\bfseries {}\\\\\\\\\\n'.format(dashline_active, ref_name))\n for c in coverages:\n f.write('$c = {}$\\\\\\\\\\n'.format(c))\n for length in average_length_list:\n val = stats_dict[(ref_name, c, length)]\n row = [length]\n row += [val['Actual'][0]]\n row += ['']\n row += val['Actual'][1:]\n row += ['']\n row += [*val['Calign'][0:2], '{0:.2f}'.format(val['Calign'][2]), *val['Calign'][3:5],\n '{0:.2f}'.format(val['Calign'][5])]\n row += ['']\n row += [*val['Calign25'][0:2], '{0:.2f}'.format(val['Calign25'][2]), *val['Calign25'][3:5],\n '{0:.2f}'.format(val['Calign25'][5])]\n row += ['']\n row += [*val['Calign50'][0:2], '{0:.2f}'.format(val['Calign50'][2]), *val['Calign50'][3:5],\n '{0:.2f}'.format(val['Calign50'][5])]\n f.write(' & '.join([str(x) for x in row]) + '\\\\\\\\\\n')\n\n\ndef write_assembly_stats2(statsdict: Dict) -> None:\n with open('/home/andreas/GDrive/workspace/sparsedata/assembly_stats2.csv', 'w') as f:\n f_csv = csv.writer(f, delimiter=',')\n refs = [ref1_name, ref2_name]\n f_csv.writerow(range(len(refs) * 9))\n\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][0] for ref_name in refs for c in\n coverages for l in average_length_list])\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][1] for ref_name in refs for c in\n coverages for l\n in average_length_list])\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)]['Actual'][2] for ref_name in refs for c in\n coverages for l\n in average_length_list])\n for foo in ['Calign', 'Calign25', 'Calign50']:\n for i in range(6):\n if i in [2, 5]:\n f_csv.writerow(\n ['{0:.2f}'.format(stats_dict[(ref_name, c, l)][foo][i]) for ref_name in refs for c in\n coverages\n for l in average_length_list])\n else:\n f_csv.writerow(\n [stats_dict[(ref_name, c, l)][foo][i] for ref_name in refs for c in\n coverages\n for l in average_length_list])\n\n\nassembly_stats_list = []\nstats_dict = {}\n# for dir in sorted(glob.glob('/home/andreas/GDrive/workspace/sparsedata/ref[1,2,3]_c[5,20,40]*/')):\nfor ref_number in [1, 2, 3]:\n for coverage in coverages:\n for length in average_length_list:\n # file_sub_dir = dir.split('/')[-2] # example ref1_c5_l100\n # ref_number = int(file_sub_dir.split('ref')[1].split('_')[0])\n ref_name = references[ref_number - 1]\n # coverage = int(file_sub_dir.split('_c')[1].split('_')[0])\n # length = int(file_sub_dir.split('_l')[1])\n dir = '/home/andreas/GDrive/workspace/sparsedata/ref{}_c{}_l{}/'.format(ref_number, coverage, length)\n stats_dict[(ref_name, coverage, length)] = {'Actual': read_fasta_stats_file(dir + 'fasta.stat'),\n 'Calign': read_assembly_file(dir + 'calign.assembly'),\n 'Calign25': read_assembly_file(\n dir + 'calign_0_{}.assembly'.format(length // 4)),\n 'Calign50': read_assembly_file(\n dir + 'calign_0_{}.assembly'.format(length // 2))}\n\n\n # dir = '{}-{}-{}'.format(references[ref_number - 1], coverage, length)\n # assembly_stats_list.append(\n # Assembly_Stats(dir, len(lkh_contigs), lkh_value, lkh_time, len(ap_contigs), ap_value, ap_time,\n # actual_Objective_value))\n\n\ndef write_whole_stats() -> None:\n headers = ['CalignLKH', 'CalignAP', 'CalignALKH', 'CalignAAP', 'CalignBLKH',\n 'CalignBAP']\n vals = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n gaps = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n both = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n atspvsapval = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n atspvsap = {'CalignLKH': 0, 'CalignAP': 0, 'CalignALKH': 0, 'CalignAAP': 0, 'CalignBLKH': 0,\n 'CalignBAP': 0}\n with open(DIR + 'assembly_stats.csv', 'r') as f:\n f_csv = csv.DictReader(f, delimiter=',')\n for row in f_csv:\n for elem in headers:\n if row['ActualValue'] == row[elem + 'Value']:\n vals[elem] += 1\n if row['ActualGaps'] == row[elem + 'Gaps']:\n gaps[elem] += 1\n if row['ActualValue'] == row[elem + 'Value'] and row['ActualGaps'] == row[elem + 'Gaps']:\n both[elem] += 1\n if row['CalignLKHValue'] == row['CalignAPValue']:\n atspvsapval['CalignLKH'] += 1\n atspvsapval['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue']:\n atspvsapval['CalignALKH'] += 1\n atspvsapval['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue']:\n atspvsapval['CalignBLKH'] += 1\n atspvsapval['CalignBAP'] += 1\n if row['CalignLKHValue'] == row['CalignAPValue'] and row['CalignLKHGaps'] == row['CalignAPGaps']:\n atspvsap['CalignLKH'] += 1\n atspvsap['CalignAP'] += 1\n if row['CalignALKHValue'] == row['CalignAAPValue'] and row['CalignALKHGaps'] == row['CalignAAPGaps']:\n atspvsap['CalignALKH'] += 1\n atspvsap['CalignAAP'] += 1\n if row['CalignBLKHValue'] == row['CalignBAPValue'] and row['CalignBLKHGaps'] == row['CalignBAPGaps']:\n atspvsap['CalignBLKH'] += 1\n atspvsap['CalignBAP'] += 1\n with open(DIR + 'complete_stats.csv', 'w') as g:\n g_csv = csv.DictWriter(g, delimiter='&', fieldnames=headers)\n g_csv.writeheader()\n g_csv.writerow(vals)\n g_csv.writerow(gaps)\n g_csv.writerow(both)\n g_csv.writerow(atspvsapval)\n g_csv.writerow(atspvsap)\n\n\nwrite_assembly_stats(stats_dict)\nwrite_assembly_stats2(stats_dict)\nwrite_assembly_stats_tex(stats_dict)\nwrite_whole_stats()\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
#
# tests/middleware/test_static.py
#
import pytest
import growler
from pathlib import Path
from unittest import mock
from sys import version_info
from growler.middleware.static import Static
@pytest.fixture
def static(tmpdir):
return Static(str(tmpdir))
def test_static_fixture(static, tmpdir):
assert isinstance(static, Static)
assert str(static.path) == str(tmpdir)
def test_construct_with_list(tmpdir):
s = Static(['/'] + str(tmpdir).split('/'))
assert str(s.path) == str(tmpdir)
def test_error_on_missing_dir():
err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError
with pytest.raises(err):
Static("/does/not/exist")
def test_static_construct_requires_directory(tmpdir):
name = "foo"
foo = tmpdir / name
foo.write('')
with pytest.raises(NotADirectoryError):
Static(str(foo))
def test_call(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
static(req, res)
res.set_type.assert_called_with('text/plain')
res.send_file.assert_called_with(file_path)
def test_call_invalid_path(static):
req, res = mock.Mock(), mock.Mock()
req.path = '/foo/../bar'
static(req, res)
assert not res.set_type.called
assert not res.send_file.called
assert not res.end.called
def test_call_with_etag(static, tmpdir):
req, res = mock.MagicMock(), mock.MagicMock()
file_contents = b'This is some text in teh file'
f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'
f.write(file_contents)
file_path = Path(str(f))
etag = static.calculate_etag(file_path)
req.path = '/foo/bar/file.txt'
req.headers = {'IF-NONE-MATCH': etag}
static(req, res)
assert res.status_code == 304
assert not res.set_type.called
assert not res.send_file.called
|
normal
|
{
"blob_id": "9a7994a1e51c9cf7fe7d8b50ab26fa3d789fc8e5",
"index": 1012,
"step-1": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\n<mask token>\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\n<mask token>\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-3": "<mask token>\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-4": "import pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static('/does/not/exist')\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = 'foo'\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n static(req, res)\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n req.path = '/foo/../bar'\n static(req, res)\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n file_contents = b'This is some text in teh file'\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n etag = static.calculate_etag(file_path)\n req.path = '/foo/bar/file.txt'\n req.headers = {'IF-NONE-MATCH': etag}\n static(req, res)\n assert res.status_code == 304\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-5": "#\n# tests/middleware/test_static.py\n#\n\nimport pytest\nimport growler\nfrom pathlib import Path\nfrom unittest import mock\nfrom sys import version_info\nfrom growler.middleware.static import Static\n\n\[email protected]\ndef static(tmpdir):\n return Static(str(tmpdir))\n\n\ndef test_static_fixture(static, tmpdir):\n assert isinstance(static, Static)\n assert str(static.path) == str(tmpdir)\n\n\ndef test_construct_with_list(tmpdir):\n s = Static(['/'] + str(tmpdir).split('/'))\n assert str(s.path) == str(tmpdir)\n\n\ndef test_error_on_missing_dir():\n err = FileNotFoundError if version_info < (3, 6) else NotADirectoryError\n with pytest.raises(err):\n Static(\"/does/not/exist\")\n\n\ndef test_static_construct_requires_directory(tmpdir):\n name = \"foo\"\n foo = tmpdir / name\n foo.write('')\n with pytest.raises(NotADirectoryError):\n Static(str(foo))\n\n\ndef test_call(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n static(req, res)\n\n res.set_type.assert_called_with('text/plain')\n res.send_file.assert_called_with(file_path)\n\n\ndef test_call_invalid_path(static):\n req, res = mock.Mock(), mock.Mock()\n\n req.path = '/foo/../bar'\n static(req, res)\n\n assert not res.set_type.called\n assert not res.send_file.called\n assert not res.end.called\n\n\ndef test_call_with_etag(static, tmpdir):\n req, res = mock.MagicMock(), mock.MagicMock()\n\n file_contents = b'This is some text in teh file'\n\n f = tmpdir.mkdir('foo').mkdir('bar') / 'file.txt'\n f.write(file_contents)\n file_path = Path(str(f))\n\n etag = static.calculate_etag(file_path)\n\n req.path = '/foo/bar/file.txt'\n\n req.headers = {'IF-NONE-MATCH': etag}\n\n static(req, res)\n\n assert res.status_code == 304\n\n assert not res.set_type.called\n assert not res.send_file.called\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
# Input Output test (입출력 테스트 )
"""
날짜 : 2021/04/27
이름 : 이지영
내용 : 파이썬 표준입출력 실습 _ 교재 p42
"""
# 파이썬 표준 출력
print('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');)
print('python')
print('010', '1234', '1111', sep='-') # seperate 값
# 파이썬 표준 입력
num = input('숫자입력 : ')
print('입력한 숫자 :', num)
print('num type :', type(num))
# 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문
result = int(num)
print('result :', result)
print('result type :', type(result))
# 서식문자 출력
print('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄
# 포맷문자 출력
print('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))
|
normal
|
{
"blob_id": "cc628270a973866025a5e2a5d07e39b4dbdcd324",
"index": 1718,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\n<mask token>\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n<mask token>\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n",
"step-3": "<mask token>\nprint('hello', end='!')\nprint('python')\nprint('010', '1234', '1111', sep='-')\nnum = input('숫자입력 : ')\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화'))\nprint('이름 : {}, 나이 : {}, 주소 : {}'.format('김유신', 23, '김해시'))\n",
"step-4": "# Input Output test (입출력 테스트 )\n\"\"\"\n날짜 : 2021/04/27\n이름 : 이지영\n내용 : 파이썬 표준입출력 실습 _ 교재 p42\n\"\"\"\n\n# 파이썬 표준 출력\nprint('hello', end='!') #print : 출력함수 (자바에선 document.write('hello');)\nprint('python')\n\nprint('010', '1234', '1111', sep='-') # seperate 값\n\n# 파이썬 표준 입력\nnum = input('숫자입력 : ')\n\nprint('입력한 숫자 :', num)\nprint('num type :', type(num))\n\n# 입력받은 문자열을 숫자로 변환하는 작업이 필요함. <class 'str'> 문자열로 읽히기 때문\nresult = int(num)\nprint('result :', result)\nprint('result type :', type(result))\n\n\n# 서식문자 출력\nprint('%d년 %d월 %d일 %s요일' % (2021, 4, 27, '화')) # %s: string 문자열을 나타냄\n\n# 포맷문자 출력\nprint('이름 : {}, 나이 : {}, 주소 : {}' .format('김유신', 23, '김해시'))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render
from django.shortcuts import redirect
from block.models import Block
from .models import Article
from .forms import ArticleForm
from django.core.paginator import Paginator
from django.contrib.auth.decorators import login_required
def article_list(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
all_articles = Article.objects.filter(block=block, article_status=0).order_by("-id")
ARTICLE_CNT_1PAGE = 2
p = Paginator(all_articles, ARTICLE_CNT_1PAGE)
page_no = int(request.GET.get("page_no", "1"))
page = p.page(page_no)
articles_objs = page.object_list
page_links = [i
for i in range(page_no - 2, page_no + 3) if i > 0 and i <= p.num_pages]
return render(request, "article_list.html",
{"articles": articles_objs, "b": block, "page_no": page_no, "page": page,
"page_links": page_links, "p": p})
@login_required
def article_create(request, block_id):
block_id = int(block_id)
block = Block.objects.get(id=block_id)
if request.method == "GET":
return render(request, "article_create.html", {"b": block})
else:
form = ArticleForm(request.POST)
if form.is_valid():
article = form.save(commit=False)
article.owner = request.user
article.block = block
article.article_status = 0
article.save()
return redirect("/article/list/%s" % block_id)
else:
return render(request, "article_create.html", {"b": block, "form": form})
def article_detail(request, article_id):
article = Article.objects.get(id=article_id)
return render(request, "article_detail.html", {"article": article})
|
normal
|
{
"blob_id": "0f94537fa64066bb29c5e9e97836b0a8ac01ac19",
"index": 9844,
"step-1": "<mask token>\n\n\n@login_required\ndef article_create(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n if request.method == 'GET':\n return render(request, 'article_create.html', {'b': block})\n else:\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.owner = request.user\n article.block = block\n article.article_status = 0\n article.save()\n return redirect('/article/list/%s' % block_id)\n else:\n return render(request, 'article_create.html', {'b': block,\n 'form': form})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@login_required\ndef article_create(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n if request.method == 'GET':\n return render(request, 'article_create.html', {'b': block})\n else:\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.owner = request.user\n article.block = block\n article.article_status = 0\n article.save()\n return redirect('/article/list/%s' % block_id)\n else:\n return render(request, 'article_create.html', {'b': block,\n 'form': form})\n\n\ndef article_detail(request, article_id):\n article = Article.objects.get(id=article_id)\n return render(request, 'article_detail.html', {'article': article})\n",
"step-3": "<mask token>\n\n\ndef article_list(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n all_articles = Article.objects.filter(block=block, article_status=0\n ).order_by('-id')\n ARTICLE_CNT_1PAGE = 2\n p = Paginator(all_articles, ARTICLE_CNT_1PAGE)\n page_no = int(request.GET.get('page_no', '1'))\n page = p.page(page_no)\n articles_objs = page.object_list\n page_links = [i for i in range(page_no - 2, page_no + 3) if i > 0 and i <=\n p.num_pages]\n return render(request, 'article_list.html', {'articles': articles_objs,\n 'b': block, 'page_no': page_no, 'page': page, 'page_links':\n page_links, 'p': p})\n\n\n@login_required\ndef article_create(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n if request.method == 'GET':\n return render(request, 'article_create.html', {'b': block})\n else:\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.owner = request.user\n article.block = block\n article.article_status = 0\n article.save()\n return redirect('/article/list/%s' % block_id)\n else:\n return render(request, 'article_create.html', {'b': block,\n 'form': form})\n\n\ndef article_detail(request, article_id):\n article = Article.objects.get(id=article_id)\n return render(request, 'article_detail.html', {'article': article})\n",
"step-4": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom block.models import Block\nfrom .models import Article\nfrom .forms import ArticleForm\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\n\n\ndef article_list(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n all_articles = Article.objects.filter(block=block, article_status=0\n ).order_by('-id')\n ARTICLE_CNT_1PAGE = 2\n p = Paginator(all_articles, ARTICLE_CNT_1PAGE)\n page_no = int(request.GET.get('page_no', '1'))\n page = p.page(page_no)\n articles_objs = page.object_list\n page_links = [i for i in range(page_no - 2, page_no + 3) if i > 0 and i <=\n p.num_pages]\n return render(request, 'article_list.html', {'articles': articles_objs,\n 'b': block, 'page_no': page_no, 'page': page, 'page_links':\n page_links, 'p': p})\n\n\n@login_required\ndef article_create(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n if request.method == 'GET':\n return render(request, 'article_create.html', {'b': block})\n else:\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.owner = request.user\n article.block = block\n article.article_status = 0\n article.save()\n return redirect('/article/list/%s' % block_id)\n else:\n return render(request, 'article_create.html', {'b': block,\n 'form': form})\n\n\ndef article_detail(request, article_id):\n article = Article.objects.get(id=article_id)\n return render(request, 'article_detail.html', {'article': article})\n",
"step-5": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom block.models import Block\nfrom .models import Article\nfrom .forms import ArticleForm\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth.decorators import login_required\n\n\ndef article_list(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n\n all_articles = Article.objects.filter(block=block, article_status=0).order_by(\"-id\")\n ARTICLE_CNT_1PAGE = 2\n p = Paginator(all_articles, ARTICLE_CNT_1PAGE)\n page_no = int(request.GET.get(\"page_no\", \"1\"))\n page = p.page(page_no)\n articles_objs = page.object_list\n\n page_links = [i\n for i in range(page_no - 2, page_no + 3) if i > 0 and i <= p.num_pages]\n\n return render(request, \"article_list.html\",\n {\"articles\": articles_objs, \"b\": block, \"page_no\": page_no, \"page\": page,\n \"page_links\": page_links, \"p\": p})\n\n@login_required\ndef article_create(request, block_id):\n block_id = int(block_id)\n block = Block.objects.get(id=block_id)\n if request.method == \"GET\":\n return render(request, \"article_create.html\", {\"b\": block})\n else:\n form = ArticleForm(request.POST)\n if form.is_valid():\n article = form.save(commit=False)\n article.owner = request.user\n article.block = block\n article.article_status = 0\n article.save()\n return redirect(\"/article/list/%s\" % block_id)\n else:\n return render(request, \"article_create.html\", {\"b\": block, \"form\": form})\n\n\ndef article_detail(request, article_id):\n article = Article.objects.get(id=article_id)\n return render(request, \"article_detail.html\", {\"article\": article})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GroupKFold
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import Lasso
def test_lasso():
test = pd.read_csv('./data/test.csv')
building_metadata = pd.read_csv('./data/building_metadata.csv')
weather_test = pd.read_csv('./data/weather_test.csv')
# Sort data for future imputation
test.sort_values(by=['building_id','timestamp'], inplace=True)
# Merging data
test = (test
.merge(building_metadata, on = 'building_id', how='left')
.merge(weather_test, on = ['site_id','timestamp'], how='left'))
del building_metadata
del weather_test
#Add dates variables
test['timestamp'] = pd.to_datetime(test['timestamp'])
test['hour'] = test.timestamp.dt.hour
test['wday'] = test.timestamp.dt.dayofweek
test['week'] = test.timestamp.dt.weekofyear
#Eliminate problematic variables
test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)
# Imputation
test = test.interpolate()
test.drop(test[test.hour==0].index, inplace=True)
test.drop(test[test.hour==1].index, inplace=True)
test.drop(test[test.hour==2].index, inplace=True)
test.drop(test[test.hour==3].index, inplace=True)
test.drop(test[test.hour==4].index, inplace=True)
test.drop(test[test.hour==5].index, inplace=True)
test.drop(test[test.hour==6].index, inplace=True)
test.drop(test[test.hour==7].index, inplace=True)
test.drop(test[test.hour==8].index, inplace=True)
test.drop(test[test.hour==9].index, inplace=True)
test.drop(test[test.hour==10].index, inplace=True)
test.drop(test[test.hour==11].index, inplace=True)
test.drop(test[test.hour==12].index, inplace=True)
test.drop(test[test.hour==13].index, inplace=True)
test.drop(test[test.hour==14].index, inplace=True)
test.drop(test[test.hour==15].index, inplace=True)
test.drop(test[test.hour==16].index, inplace=True)
test.drop(test[test.hour==17].index, inplace=True)
test.drop(test[test.hour==18].index, inplace=True)
test.drop(test[test.hour==19].index, inplace=True)
test.drop(test[test.hour==20].index, inplace=True)
test.drop(test[test.hour==21].index, inplace=True)
# One Hot Encoding
encode = OneHotEncoder(categories='auto',drop = 'first')
catego_var = test.loc[:,['building_id','meter']].to_numpy()
catego_var = encode.fit_transform(catego_var).toarray()
encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']
encode_var = pd.DataFrame(catego_var, columns = encode_names)
test.drop('meter', inplace=True, axis = 1)
test.reset_index(drop=True,inplace=True)
test = test.join(encode_var)
# Add row as set_index
test.set_index('row_id', inplace=True)
return test
#X_train, y_train = train_lasso()
#mod_lasso = Lasso()
#mod_lasso.fit(X_train, y_train)
#print(mod_lasso.coef_)
from joblib import dump, load
mod_lasso = load('mod_lasso.joblib')
X_test = test_lasso()
y_pred = mod_lasso.predict(X_test)
print(X_test.head())
sub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])
sub.sort_values(by = 'row_id', inplace = True)
sub.to_csv('./submission12.csv')
|
normal
|
{
"blob_id": "6028b46eab422dea02af24e9cf724fe0d8b3ecc4",
"index": 9531,
"step-1": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nprint(X_test.head())\n<mask token>\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-3": "<mask token>\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\n<mask token>\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-4": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n test.sort_values(by=['building_id', 'timestamp'], inplace=True)\n test = test.merge(building_metadata, on='building_id', how='left').merge(\n weather_test, on=['site_id', 'timestamp'], how='left')\n del building_metadata\n del weather_test\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n test.drop(['timestamp', 'year_built', 'floor_count', 'cloud_coverage',\n 'site_id', 'primary_use', 'wind_direction', 'square_feet',\n 'dew_temperature', 'sea_level_pressure', 'wind_speed',\n 'precip_depth_1_hr'], inplace=True, axis=1)\n test = test.interpolate()\n test.drop(test[test.hour == 0].index, inplace=True)\n test.drop(test[test.hour == 1].index, inplace=True)\n test.drop(test[test.hour == 2].index, inplace=True)\n test.drop(test[test.hour == 3].index, inplace=True)\n test.drop(test[test.hour == 4].index, inplace=True)\n test.drop(test[test.hour == 5].index, inplace=True)\n test.drop(test[test.hour == 6].index, inplace=True)\n test.drop(test[test.hour == 7].index, inplace=True)\n test.drop(test[test.hour == 8].index, inplace=True)\n test.drop(test[test.hour == 9].index, inplace=True)\n test.drop(test[test.hour == 10].index, inplace=True)\n test.drop(test[test.hour == 11].index, inplace=True)\n test.drop(test[test.hour == 12].index, inplace=True)\n test.drop(test[test.hour == 13].index, inplace=True)\n test.drop(test[test.hour == 14].index, inplace=True)\n test.drop(test[test.hour == 15].index, inplace=True)\n test.drop(test[test.hour == 16].index, inplace=True)\n test.drop(test[test.hour == 17].index, inplace=True)\n test.drop(test[test.hour == 18].index, inplace=True)\n test.drop(test[test.hour == 19].index, inplace=True)\n test.drop(test[test.hour == 20].index, inplace=True)\n test.drop(test[test.hour == 21].index, inplace=True)\n encode = OneHotEncoder(categories='auto', drop='first')\n catego_var = test.loc[:, ['building_id', 'meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1',\n 'meter_2', 'meter_3']\n encode_var = pd.DataFrame(catego_var, columns=encode_names)\n test.drop('meter', inplace=True, axis=1)\n test.reset_index(drop=True, inplace=True)\n test = test.join(encode_var)\n test.set_index('row_id', inplace=True)\n return test\n\n\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib')\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\nsub = pd.DataFrame(np.maximum(0, y_pred), index=X_test.index, columns=[\n 'meter_reading'])\nsub.sort_values(by='row_id', inplace=True)\nsub.to_csv('./submission12.csv')\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_log_error\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.linear_model import Lasso\n\n\ndef test_lasso():\n\n test = pd.read_csv('./data/test.csv')\n building_metadata = pd.read_csv('./data/building_metadata.csv')\n weather_test = pd.read_csv('./data/weather_test.csv')\n\n # Sort data for future imputation\n test.sort_values(by=['building_id','timestamp'], inplace=True)\n\n # Merging data\n test = (test\n .merge(building_metadata, on = 'building_id', how='left')\n .merge(weather_test, on = ['site_id','timestamp'], how='left'))\n\n del building_metadata\n del weather_test\n\n #Add dates variables\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n test['hour'] = test.timestamp.dt.hour\n test['wday'] = test.timestamp.dt.dayofweek\n test['week'] = test.timestamp.dt.weekofyear\n\n #Eliminate problematic variables\n test.drop(['timestamp','year_built','floor_count','cloud_coverage','site_id','primary_use','wind_direction','square_feet','dew_temperature','sea_level_pressure','wind_speed','precip_depth_1_hr'], inplace=True, axis = 1)\n\n # Imputation\n test = test.interpolate()\n test.drop(test[test.hour==0].index, inplace=True)\n test.drop(test[test.hour==1].index, inplace=True)\n test.drop(test[test.hour==2].index, inplace=True)\n test.drop(test[test.hour==3].index, inplace=True)\n test.drop(test[test.hour==4].index, inplace=True)\n test.drop(test[test.hour==5].index, inplace=True)\n test.drop(test[test.hour==6].index, inplace=True)\n test.drop(test[test.hour==7].index, inplace=True)\n test.drop(test[test.hour==8].index, inplace=True)\n test.drop(test[test.hour==9].index, inplace=True)\n test.drop(test[test.hour==10].index, inplace=True)\n test.drop(test[test.hour==11].index, inplace=True)\n test.drop(test[test.hour==12].index, inplace=True)\n test.drop(test[test.hour==13].index, inplace=True)\n test.drop(test[test.hour==14].index, inplace=True)\n test.drop(test[test.hour==15].index, inplace=True)\n test.drop(test[test.hour==16].index, inplace=True)\n test.drop(test[test.hour==17].index, inplace=True)\n test.drop(test[test.hour==18].index, inplace=True)\n test.drop(test[test.hour==19].index, inplace=True)\n test.drop(test[test.hour==20].index, inplace=True)\n test.drop(test[test.hour==21].index, inplace=True)\n\n # One Hot Encoding\n\n encode = OneHotEncoder(categories='auto',drop = 'first')\n catego_var = test.loc[:,['building_id','meter']].to_numpy()\n catego_var = encode.fit_transform(catego_var).toarray()\n encode_names = test.building_id.unique().tolist()[1:] + ['meter_1','meter_2','meter_3']\n encode_var = pd.DataFrame(catego_var, columns = encode_names)\n\n test.drop('meter', inplace=True, axis = 1)\n test.reset_index(drop=True,inplace=True)\n test = test.join(encode_var)\n\n # Add row as set_index\n test.set_index('row_id', inplace=True)\n\n return test\n\n\n\n#X_train, y_train = train_lasso()\n\n#mod_lasso = Lasso()\n#mod_lasso.fit(X_train, y_train)\n\n#print(mod_lasso.coef_)\nfrom joblib import dump, load\nmod_lasso = load('mod_lasso.joblib') \n\n\nX_test = test_lasso()\ny_pred = mod_lasso.predict(X_test)\nprint(X_test.head())\n\nsub = pd.DataFrame(np.maximum(0,y_pred), index = X_test.index, columns = ['meter_reading'])\nsub.sort_values(by = 'row_id', inplace = True)\nsub.to_csv('./submission12.csv')",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
##Extras
def permissao():
editor = False
for row in session.auth.user_groups:
grupo = session.auth.user_groups[row]
if (grupo == "gerenciador") or (grupo == "administrador"):
editor = True
return editor
|
normal
|
{
"blob_id": "70de2bed00aabe3805c3a19da004713d4109568a",
"index": 9036,
"step-1": "<mask token>\n",
"step-2": "def permissao():\n editor = False\n for row in session.auth.user_groups:\n grupo = session.auth.user_groups[row]\n if grupo == 'gerenciador' or grupo == 'administrador':\n editor = True\n return editor\n",
"step-3": "##Extras\n\ndef permissao():\n\teditor = False\n\tfor row in session.auth.user_groups:\n\t\tgrupo = session.auth.user_groups[row]\n\t\tif (grupo == \"gerenciador\") or (grupo == \"administrador\"):\n\t\t\teditor = True\n\treturn editor",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
import json
import random
chapter_mode = True
setname = 'test_other'
use_chapter = '_chapter'
minlen = 1000
maxlen = 1000
context = '_1000'
info_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)
book_ID_mapping = {}
with open('speaker_book.txt') as fin:
for line in fin:
elems = line.split('|')
ID = elems[0].lstrip().strip()
speaker = elems[1].lstrip().strip()
subset = elems[3].lstrip().strip()
book = elems[5].lstrip().strip()
if (speaker, book) not in book_ID_mapping:
book_ID_mapping[(speaker, book)] = [ID]
else:
book_ID_mapping[(speaker, book)].append(ID)
with open(info_json) as fin:
spk_bookwords = json.load(fin)
worddict = set()
with open('../all_rare_words.txt') as fin:
for line in fin:
word = line.strip()
worddict.add(word)
worddict_full = {}
with open('word_freq.txt') as fin:
for line in fin:
word, freq = line.split()
worddict_full[word] = int(freq)
spk_book_KB = {}
KBfulllist = set()
for speaker, books in spk_bookwords.items():
# spk_book_KB[speaker] = {}
for book, content in books.items():
speaker_book_IDs = book_ID_mapping[(speaker, book)] if 'chapter' not in info_json else [speaker]
for speaker_book_ID in speaker_book_IDs:
spk_book_KB[speaker_book_ID] = []
bookwords = content['bookwords']
oovwords = content['oovwords']
for word in bookwords:
if word in worddict:
spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))
if word not in KBfulllist:
KBfulllist.add(word)
for word in oovwords:
if word in worddict:
spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))
if word not in KBfulllist:
KBfulllist.add(word)
full_wordlist = list(KBfulllist)
output_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)
os.system('mkdir -p {}'.format(output_path))
worddict = list(worddict)
for ID, KB in spk_book_KB.items():
random.shuffle(worddict)
count = 0
while len(KB) < minlen and count < len(worddict):
word = worddict[count]
freq = worddict_full[word] if word in worddict_full else 0
if (word, freq) not in KB:
KB.append((word, freq))
count += 1
KB.sort(key=lambda tup: tup[1])
with open(os.path.join(output_path, ID), 'w') as fout:
for word, freq in KB[:maxlen]:
fout.write(word+'\n')
|
normal
|
{
"blob_id": "3b41bd59c133bb04dae3aa48dc0699388d5bf3d4",
"index": 8346,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\n<mask token>\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\n<mask token>\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\n<mask token>\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n<mask token>\nos.system('mkdir -p {}'.format(output_path))\n<mask token>\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n",
"step-3": "<mask token>\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\nspk_book_KB = {}\nKBfulllist = set()\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n",
"step-4": "import os\nimport json\nimport random\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[speaker, book] = [ID]\n else:\n book_ID_mapping[speaker, book].append(ID)\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\nspk_book_KB = {}\nKBfulllist = set()\nfor speaker, books in spk_bookwords.items():\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[speaker, book\n ] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, \n worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word + '\\n')\n",
"step-5": "import os\nimport json\nimport random\n\n\nchapter_mode = True\nsetname = 'test_other'\nuse_chapter = '_chapter'\nminlen = 1000\nmaxlen = 1000\ncontext = '_1000'\n\ninfo_json = 'bookinfo{}_{}{}.json'.format(use_chapter, setname, context)\nbook_ID_mapping = {}\nwith open('speaker_book.txt') as fin:\n for line in fin:\n elems = line.split('|')\n ID = elems[0].lstrip().strip()\n speaker = elems[1].lstrip().strip()\n subset = elems[3].lstrip().strip()\n book = elems[5].lstrip().strip()\n if (speaker, book) not in book_ID_mapping:\n book_ID_mapping[(speaker, book)] = [ID]\n else:\n book_ID_mapping[(speaker, book)].append(ID)\n\nwith open(info_json) as fin:\n spk_bookwords = json.load(fin)\n\nworddict = set()\nwith open('../all_rare_words.txt') as fin:\n for line in fin:\n word = line.strip()\n worddict.add(word)\n\nworddict_full = {}\nwith open('word_freq.txt') as fin:\n for line in fin:\n word, freq = line.split()\n worddict_full[word] = int(freq)\n\nspk_book_KB = {}\n\nKBfulllist = set()\n\nfor speaker, books in spk_bookwords.items():\n # spk_book_KB[speaker] = {}\n for book, content in books.items():\n speaker_book_IDs = book_ID_mapping[(speaker, book)] if 'chapter' not in info_json else [speaker]\n for speaker_book_ID in speaker_book_IDs:\n spk_book_KB[speaker_book_ID] = []\n bookwords = content['bookwords']\n oovwords = content['oovwords']\n for word in bookwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0)) \n if word not in KBfulllist:\n KBfulllist.add(word)\n for word in oovwords:\n if word in worddict:\n spk_book_KB[speaker_book_ID].append((word, worddict_full[word] if word in worddict_full else 0))\n if word not in KBfulllist:\n KBfulllist.add(word)\n\nfull_wordlist = list(KBfulllist)\noutput_path = 'LibriKB{}{}all_{}'.format(use_chapter[1:], context, maxlen)\nos.system('mkdir -p {}'.format(output_path))\nworddict = list(worddict)\nfor ID, KB in spk_book_KB.items():\n random.shuffle(worddict)\n count = 0\n while len(KB) < minlen and count < len(worddict):\n word = worddict[count]\n freq = worddict_full[word] if word in worddict_full else 0\n if (word, freq) not in KB:\n KB.append((word, freq))\n count += 1\n KB.sort(key=lambda tup: tup[1])\n with open(os.path.join(output_path, ID), 'w') as fout:\n for word, freq in KB[:maxlen]:\n fout.write(word+'\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
f_s_list = [2, 1.5, 1, 0.5, 0.2]
g_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]
h_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]
i_seed_list = [1, 12, 123, 1234, 12345, 123456]
for s in f_s_list:
os.system("python SKs_model.py " + str(s) + " 0 10000 0 relu")
for train_end in g_end_list:
os.system("python SKs_model.py 0.2 0 " + str(train_end) + " 0 relu")
for train_begin, train_end in h_i_list:
os.system("python SKs_model.py 0.2 " + str(train_begin) + " " + str(train_end) + " 0 relu")
for seed in i_seed_list:
os.system("python SKs_model.py 0.2 0 10000 " + str(seed) + " relu")
for activation in ["sigmoid", "relu"]:
os.system("python SKs_model.py 0.2 0 10000 0 " + activation)
|
normal
|
{
"blob_id": "56a681015ea27e2c8e00ab8bcc8019d5987c4ee1",
"index": 6949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-3": "<mask token>\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-4": "import os\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\nfor s in f_s_list:\n os.system('python SKs_model.py ' + str(s) + ' 0 10000 0 relu')\nfor train_end in g_end_list:\n os.system('python SKs_model.py 0.2 0 ' + str(train_end) + ' 0 relu')\nfor train_begin, train_end in h_i_list:\n os.system('python SKs_model.py 0.2 ' + str(train_begin) + ' ' + str(\n train_end) + ' 0 relu')\nfor seed in i_seed_list:\n os.system('python SKs_model.py 0.2 0 10000 ' + str(seed) + ' relu')\nfor activation in ['sigmoid', 'relu']:\n os.system('python SKs_model.py 0.2 0 10000 0 ' + activation)\n",
"step-5": "import os\n\nf_s_list = [2, 1.5, 1, 0.5, 0.2]\n\ng_end_list = [500, 1000, 2000, 5000, 10000, 20000, 60000]\n\nh_i_list = [(10000 * i, 10000 * (i + 1)) for i in range(6)]\n\ni_seed_list = [1, 12, 123, 1234, 12345, 123456]\n\nfor s in f_s_list:\n os.system(\"python SKs_model.py \" + str(s) + \" 0 10000 0 relu\")\n\nfor train_end in g_end_list:\n os.system(\"python SKs_model.py 0.2 0 \" + str(train_end) + \" 0 relu\")\n\nfor train_begin, train_end in h_i_list:\n os.system(\"python SKs_model.py 0.2 \" + str(train_begin) + \" \" + str(train_end) + \" 0 relu\")\n\nfor seed in i_seed_list:\n os.system(\"python SKs_model.py 0.2 0 10000 \" + str(seed) + \" relu\")\n\nfor activation in [\"sigmoid\", \"relu\"]:\n os.system(\"python SKs_model.py 0.2 0 10000 0 \" + activation)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import uuid
import json
import pytest
import requests
import httpx
from spinta.testing.manifest import bootstrap_manifest
from spinta.utils.data import take
from spinta.testing.utils import error
from spinta.testing.utils import get_error_codes, RowIds
from spinta.testing.context import create_test_context
from spinta.testing.client import create_test_client
from spinta.manifests.tabular.helpers import striptable
from spinta.testing.tabular import create_tabular_manifest
from spinta.testing.data import listdata
test_data = [
{
'_type': 'report',
'status': 'OK',
'report_type': 'STV',
'count': 10,
'notes': [{
'note': 'hello',
'note_type': 'simple',
'create_date': '2019-03-14',
}],
'operating_licenses': [{
'license_types': ['valid', 'invalid'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'VMI',
'count': 42,
'notes': [{
'note': 'world',
'note_type': 'daily',
'create_date': '2019-04-20',
}],
'operating_licenses': [{
'license_types': ['expired'],
}],
},
{
'_type': 'report',
'status': 'invalid',
'report_type': 'STV',
'count': 13,
'notes': [{
'note': 'foo bar',
'note_type': 'important',
'create_date': '2019-02-01',
}],
},
]
def _push_test_data(app, model, data=None):
app.authmodel(model, ['insert'])
resp = app.post('/', json={'_data': [
{
**res,
'_op': 'insert',
'_type': model,
}
for res in data or test_data
]})
assert resp.status_code == 200, resp.json()
resp = resp.json()
assert '_data' in resp, resp
return resp['_data']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?status="OK"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_lower(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_non_string(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, non string type
resp = app.get(f'/{model}?count=13')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# single field fsearch, non string type
resp = app.get(f'/{model}?count="abc"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# single non-existing field value search
resp = app.get(f'/{model}?status="o"')
data = resp.json()['_data']
assert len(data) == 0
# single non-existing field search
resp = app.get(f'/{model}?state="o"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_multiple_props(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&report_type.lower()="stv"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_exact_same_prop_multiple_times(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?status.lower()="invalid"&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>42')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gt_with_nested_date(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>"2019-04-19"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_gte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count>=40')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# search for string value
resp = app.get(f'/{model}?status>="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count>=40&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `greater_than` works as expected
resp = app.get(f'/{model}?count>=42')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ge_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)>="2019-04-20"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<"ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<10')
data = resp.json()['_data']
assert len(data) == 0
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lt_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<"2019-02-02"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lte(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?count<=12')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# search for string value
resp = app.get(f'/{model}?status<="ok"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=20&count>10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count<=50&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# test `lower_than` works as expected
resp = app.get(f'/{model}?count<=10')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_le_with_nested_date(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date)<="2019-02-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search
resp = app.get(f'/{model}?status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_lower(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# single field search, case insensitive
resp = app.get(f'/{model}?status.lower()!="ok"')
assert ids(resp) == [1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?count!=10&count!=42')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_multiple_props_and_logic(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.lower()!="ok"&report_type.lower()="stv"')
assert ids(resp) == [2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structure
resp = app.get(f'/{model}?notes.create_date!="2019-02-01"&status!="invalid"')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_ne_nested_missing_data(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
# test `ne` with nested structures and not full data in all resources
resp = app.get(f'/{model}?operating_licenses.license_types!="valid"')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_case_insensitive(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_multi_field(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower().contains("tv")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.contains("TV")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# multi field search
# test if operators are joined with AND logic for same field
resp = app.get(f'/{model}?report_type.lower().contains("vm")&report_type.lower().contains("mi")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.contains("valid")&report_type.lower()="vmi"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_type_check(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(create_date).contains("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_contains_with_select(model, context, app, mocker):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# `contains` with select
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'count': 42,
}
# `contains` with select and always_show_id
mocker.patch.object(context.get('config'), 'always_show_id', True)
resp = app.get(f'/{model}?report_type.lower().contains("vm")&select(count)')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
'count': 42,
}
# `contains` with always_show_id should return just id
resp = app.get(f'/{model}?report_type.lower().contains("vm")')
assert resp.status_code == 200
data = resp.json()['_data']
assert len(data) == 1
assert data[0] == {
'_id': r2['_id'],
}
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_unknown_property_in_object(model, context, app, mocker):
_push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?select(notes.nothere)')
assert error(resp) == 'FieldNotInResource'
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_startswith(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# single field search
resp = app.get(f'/{model}?report_type.startswith("VM")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# single field search, case insensitive
resp = app.get(f'/{model}?report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?status.startswith("in")&report_type.lower().startswith("vm")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# multi field and multi operator search
# test if operators are joined with AND logic
resp = app.get(f'/{model}?report_type.lower().startswith("st")&status.lower()="ok"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# sanity check that `startswith` searches from the start
resp = app.get(f'/{model}?status.startswith("valid")')
data = resp.json()['_data']
assert len(data) == 0
# `startswith` type check
resp = app.get(f'/{model}?notes.create_date.startswith("2019-04-20")')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["InvalidValue"]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
# nested `exact` search
resp = app.get(f'/{model}?notes.note="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search, case insensitive
resp = app.get(f'/{model}?notes.note.lower()="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
# nested `exact` search with dates
resp = app.get(f'/{model}?notes.create_date="2019-03-14"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r1['_id']
# nested `gt` search
resp = app.get(f'/{model}?notes.create_date>"2019-04-01"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
# nested non existant field
resp = app.get(f'/{model}?notes.foo.bar="baz"')
assert resp.status_code == 400
assert get_error_codes(resp.json()) == ["FieldNotInResource"]
# nested `contains` search
resp = app.get(f'/{model}?notes.note.contains("bar")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_contains(model, context, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?operating_licenses.license_types.contains("lid")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_startswith(model, context, app):
app.authmodel(model, ['search'])
r1, r2, r3, = _push_test_data(app, model)
# nested `startswith` search
resp = app.get(f'/{model}?notes.note.startswith("fo")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
resp = app.get(f'/{model}?operating_licenses.license_types.startswith("exp")')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r2['_id']
def ids(resources):
if isinstance(resources, (requests.models.Response, httpx.Response)):
resp = resources
assert resp.status_code == 200, resp.json()
resources = resp.json()['_data']
return [r['_id'] for r in resources]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_or(model, context, app):
ids = RowIds(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?count=42|status.lower()="ok"')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?count<=10|count=13')
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse(model, context, app):
r1, r2, r3, = _push_test_data(app, model)
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(note)="foo bar"')
data = resp.json()['_data']
assert len(data) == 1
assert data[0]['_id'] == r3['_id']
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_nested_recurse_lower(model, context, app):
r1, r2, r3, = ids(_push_test_data(app, model))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(status).lower()="ok"')
assert ids(resp) == [r1]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_nested_recurse_multiple_props(model, context, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'fi'},
{'govid': '2', 'country': 'se'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'no'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country)="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country)="no"')
assert ids(resp) == [r2]
@pytest.mark.models(
'backends/mongo/recurse',
'backends/postgres/recurse',
)
def test_search_recurse_multiple_props_lower(model, app):
r1, r2, = ids(_push_test_data(app, model, [
{
'title': "Org",
'country': 'fi',
'govids': [
{'govid': '1', 'country': 'FI'},
{'govid': '2', 'country': 'SE'},
]
},
{
'title': "Org",
'country': 'no',
'govids': [
{'govid': '3', 'country': 'NO'},
]
},
]))
app.authmodel(model, ['search'])
resp = app.get(f'/{model}?recurse(country).lower()="se"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="fi"')
assert ids(resp) == [r1]
resp = app.get(f'/{model}?recurse(country).lower()="no"')
assert ids(resp) == [r2]
# TODO: add mongo
def test_search_any(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",count,10,42)')
assert ids(resp) == [0, 1]
resp = app.get(f'/{model}?any("ne",count,42)')
assert ids(resp) == [0, 2]
# TODO: add mongo
def test_search_any_in_list(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",notes.note,"hello","world")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",notes.note,"foo bar")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_in_list_of_scalars(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",operating_licenses.license_types,"valid","invalid","expired")')
assert sorted(ids(resp)) == [0, 1]
resp = app.get(f'/{model}?any("ne",operating_licenses.license_types,"expired")')
assert sorted(ids(resp)) == [0]
# TODO: add mongo
def test_search_any_recurse(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status),"OK","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("eq",recurse(status).lower(),"ok","none")')
assert ids(resp) == [0]
# TODO: add mongo
def test_search_any_contains(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",status,"inv","val","lid")')
assert sorted(ids(resp)) == [1, 2]
# TODO: add mongo
def test_search_any_contains_nested(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",notes.note,"hel","wor")')
assert sorted(ids(resp)) == [0, 1]
# TODO: add mongo
def test_search_any_contains_recurse_lower(app):
model = 'backends/postgres/report'
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?any("contains",recurse(status).lower(),"o","k")')
assert sorted(ids(resp)) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.contains("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_contains(model, app):
app.authmodel(model, ['search', 'getall'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_id.contains("AAAAA")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][:5]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_id_not_startswith(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
subid = ids[0][5:10]
resp = app.get(f'/{model}?_id.startswith("{subid}")')
assert ids(resp) == []
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_contains(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?_revision.contains("-")')
assert sorted(ids(resp)) == [0, 1, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_revision_startswith(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision'][:5]
resp = app.get(f'/{model}?_revision.startswith("{revision}")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_group(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"&status="OK")')
assert ids(resp) == [0]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_select_in_or(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?(report_type="STV"|status="OK")&select(_id)')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_lower_contains(model, app):
app.authmodel(model, ['search', 'getone'])
ids = RowIds(_push_test_data(app, model))
resp = app.get(f'/{model}?report_type.lower().contains("st")')
# XXX: Flaky test, some times it gives [2, 0], don't know why.
assert ids(resp) == [0, 2]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status=null')
assert ids(resp) == [1]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_search_not_null(model, app):
app.authmodel(model, ['search'])
ids = RowIds(_push_test_data(app, model, [
{'status': 'OK'},
{},
]))
resp = app.get(f'/{model}?status!=null')
assert ids(resp) == [0]
@pytest.mark.parametrize('backend', ['default', 'mongo'])
def test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| code | string
| name | string
'''))
context = create_test_context(rc)
request.addfinalizer(context.wipe_all)
app = create_test_client(context)
app.authmodel('extrafields', ['insert'])
resp = app.post('/extrafields', json={'_data': [
{'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},
{'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},
{'_op': 'insert', 'code': 'ee', 'name': 'Estija'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
extrafields |
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('extrafields', ['getall', 'getone'])
resp = app.get('/extrafields')
assert listdata(resp, sort=True) == [
"Estija",
"Latvija",
"Lietuva",
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/extrafields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'name': 'Lietuva'}
@pytest.mark.parametrize('backend', ['mongo'])
def test_missing_fields(postgresql, mongo, backend, rc, tmp_path):
rc = rc.fork({
'backends': [backend],
'manifests.default': {
'type': 'tabular',
'path': str(tmp_path / 'manifest.csv'),
'backend': backend,
},
})
# Create data into a extrafields model with code and name properties.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['insert'])
resp = app.post('/missingfields', json={'_data': [
{'_op': 'insert', 'code': 'lt'},
{'_op': 'insert', 'code': 'lv'},
{'_op': 'insert', 'code': 'ee'},
]})
assert resp.status_code == 200, resp.json()
# Now try to read from same model, but loaded with just one property.
create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''
m | property | type
missingfields |
| code | string
| name | string
'''))
context = create_test_context(rc)
app = create_test_client(context)
app.authmodel('missingfields', ['search', 'getone'])
resp = app.get('/missingfields?select(_id,code,name)')
assert listdata(resp, sort=True) == [
('ee', None),
('lt', None),
('lv', None),
]
pk = resp.json()['_data'][0]['_id']
resp = app.get(f'/missingfields/{pk}')
data = resp.json()
assert resp.status_code == 200, data
assert take(data) == {'code': 'lt'}
def test_base_select(rc, postgresql, request):
context = bootstrap_manifest(rc, '''
d | r | b | m | property | type | ref
datasets/gov/example/base | |
| |
| | | Location | |
| | | | id | integer |
| | | | name | string |
| | | | type | string |
| |
| | Location | |
| | | City | |
| | | | id | |
| | | | name | string |
| | | | population | integer |
''', backend=postgresql, request=request)
app = create_test_client(context)
app.authorize(['spinta_set_meta_fields'])
app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])
app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])
_id = str(uuid.uuid4())
app.post('/datasets/gov/example/base/Location', json={
'_id': _id,
'id': 1,
'name': 'Base location',
'type': 'city'
})
app.post('/datasets/gov/example/base/City', json={
'_id': _id,
'name': 'City',
'population': 100
})
resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')
assert resp.json()['_data'] == [
{
'_base': {'name': 'Base location', 'type': 'city'},
'id': 1,
'name': 'City',
'population': 100
}
]
@pytest.mark.models(
'backends/mongo/report',
'backends/postgres/report',
)
def test_select_revision(model, app):
app.authmodel(model, ['search', 'getone', 'getall'])
ids = RowIds(_push_test_data(app, model))
id0 = ids[0]
resp = app.get(f'/{model}/{id0}')
revision = resp.json()['_revision']
resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')
assert json.loads(resp.content) == {
'_revision': revision
}
|
normal
|
{
"blob_id": "57e9c1a4ac57f68e0e73c2c67c6828de8efb1b16",
"index": 3903,
"step-1": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\[email protected]('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\n<mask token>\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-2": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n<mask token>\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\[email protected]('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\[email protected]('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-3": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\[email protected]('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\[email protected]('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-4": "<mask token>\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [{**res, '_op': 'insert', '_type':\n model} for res in data or test_data]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_gte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lte(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'count': 42}\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)'\n )\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id'], 'count': 42}\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {'_id': r2['_id']}\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_startswith(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")'\n )\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(\n f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['InvalidValue']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == ['FieldNotInResource']\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")'\n )\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3 = _push_test_data(app, model)\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n resp = app.get(\n f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3 = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\n<mask token>\n\n\[email protected]('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'fi'}, {'govid': '2',\n 'country': 'se'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'no'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\[email protected]('backends/mongo/recurse', 'backends/postgres/recurse')\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2 = ids(_push_test_data(app, model, [{'title': 'Org', 'country':\n 'fi', 'govids': [{'govid': '1', 'country': 'FI'}, {'govid': '2',\n 'country': 'SE'}]}, {'title': 'Org', 'country': 'no', 'govids': [{\n 'govid': '3', 'country': 'NO'}]}]))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(\n f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")'\n )\n assert sorted(ids(resp)) == [0, 1]\n resp = app.get(\n f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\n<mask token>\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n assert ids(resp) == [0, 2]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [{'status': 'OK'}, {}]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\[email protected]('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [{'_op': 'insert',\n 'code': 'lt', 'name': 'Lietuva'}, {'_op': 'insert', 'code': 'lv',\n 'name': 'Latvija'}, {'_op': 'insert', 'code': 'ee', 'name': 'Estija'}]}\n )\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n extrafields |\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == ['Estija', 'Latvija', 'Lietuva']\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\[email protected]('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({'backends': [backend], 'manifests.default': {'type':\n 'tabular', 'path': str(tmp_path / 'manifest.csv'), 'backend': backend}}\n )\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [{'_op': 'insert',\n 'code': 'lt'}, {'_op': 'insert', 'code': 'lv'}, {'_op': 'insert',\n 'code': 'ee'}]})\n assert resp.status_code == 200, resp.json()\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable(\n \"\"\"\n m | property | type\n missingfields |\n | code | string\n | name | string\n \"\"\"\n ))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [('ee', None), ('lt', None), ('lv',\n None)]\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc,\n \"\"\"\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n \"\"\"\n , backend=postgresql, request=request)\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete',\n 'getall', 'search'])\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={'_id': _id, 'id':\n 1, 'name': 'Base location', 'type': 'city'})\n app.post('/datasets/gov/example/base/City', json={'_id': _id, 'name':\n 'City', 'population': 100})\n resp = app.get(\n '/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)'\n )\n assert resp.json()['_data'] == [{'_base': {'name': 'Base location',\n 'type': 'city'}, 'id': 1, 'name': 'City', 'population': 100}]\n\n\[email protected]('backends/mongo/report', 'backends/postgres/report')\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {'_revision': revision}\n",
"step-5": "import uuid\nimport json\n\nimport pytest\nimport requests\nimport httpx\nfrom spinta.testing.manifest import bootstrap_manifest\n\nfrom spinta.utils.data import take\nfrom spinta.testing.utils import error\nfrom spinta.testing.utils import get_error_codes, RowIds\nfrom spinta.testing.context import create_test_context\nfrom spinta.testing.client import create_test_client\nfrom spinta.manifests.tabular.helpers import striptable\nfrom spinta.testing.tabular import create_tabular_manifest\nfrom spinta.testing.data import listdata\n\n\ntest_data = [\n {\n '_type': 'report',\n 'status': 'OK',\n 'report_type': 'STV',\n 'count': 10,\n 'notes': [{\n 'note': 'hello',\n 'note_type': 'simple',\n 'create_date': '2019-03-14',\n }],\n 'operating_licenses': [{\n 'license_types': ['valid', 'invalid'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'VMI',\n 'count': 42,\n 'notes': [{\n 'note': 'world',\n 'note_type': 'daily',\n 'create_date': '2019-04-20',\n }],\n 'operating_licenses': [{\n 'license_types': ['expired'],\n }],\n },\n {\n '_type': 'report',\n 'status': 'invalid',\n 'report_type': 'STV',\n 'count': 13,\n 'notes': [{\n 'note': 'foo bar',\n 'note_type': 'important',\n 'create_date': '2019-02-01',\n }],\n },\n]\n\n\ndef _push_test_data(app, model, data=None):\n app.authmodel(model, ['insert'])\n resp = app.post('/', json={'_data': [\n {\n **res,\n '_op': 'insert',\n '_type': model,\n }\n for res in data or test_data\n ]})\n assert resp.status_code == 200, resp.json()\n resp = resp.json()\n assert '_data' in resp, resp\n return resp['_data']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?status=\"OK\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_lower(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_non_string(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search, non string type\n resp = app.get(f'/{model}?count=13')\n data = resp.json()['_data']\n\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # single field fsearch, non string type\n resp = app.get(f'/{model}?count=\"abc\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # single non-existing field value search\n resp = app.get(f'/{model}?status=\"o\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # single non-existing field search\n resp = app.get(f'/{model}?state=\"o\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_multiple_props(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&report_type.lower()=\"stv\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_exact_same_prop_multiple_times(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?status.lower()=\"invalid\"&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>42')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gt_with_nested_date(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>\"2019-04-19\"')\n assert ids(resp) == [1]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_gte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count>=40')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status>=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count>=40&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `greater_than` works as expected\n resp = app.get(f'/{model}?count>=42')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ge_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)>=\"2019-04-20\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<10')\n data = resp.json()['_data']\n assert len(data) == 0\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lt_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<\"2019-02-02\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lte(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?count<=12')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # search for string value\n resp = app.get(f'/{model}?status<=\"ok\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=20&count>10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count<=50&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # test `lower_than` works as expected\n resp = app.get(f'/{model}?count<=10')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_le_with_nested_date(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date)<=\"2019-02-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n\n # single field search\n resp = app.get(f'/{model}?status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_lower(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # single field search, case insensitive\n resp = app.get(f'/{model}?status.lower()!=\"ok\"')\n assert ids(resp) == [1, 2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?count!=10&count!=42')\n assert ids(resp) == [2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_multiple_props_and_logic(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.lower()!=\"ok\"&report_type.lower()=\"stv\"')\n assert ids(resp) == [2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structure\n resp = app.get(f'/{model}?notes.create_date!=\"2019-02-01\"&status!=\"invalid\"')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_ne_nested_missing_data(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n # test `ne` with nested structures and not full data in all resources\n resp = app.get(f'/{model}?operating_licenses.license_types!=\"valid\"')\n assert ids(resp) == [1]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_case_insensitive(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_multi_field(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower().contains(\"tv\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.contains(\"TV\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # multi field search\n # test if operators are joined with AND logic for same field\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&report_type.lower().contains(\"mi\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.contains(\"valid\")&report_type.lower()=\"vmi\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_type_check(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(create_date).contains(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_contains_with_select(model, context, app, mocker):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n\n # `contains` with select\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n 'count': 42,\n }\n\n # `contains` with select and always_show_id\n mocker.patch.object(context.get('config'), 'always_show_id', True)\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")&select(count)')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n 'count': 42,\n }\n\n # `contains` with always_show_id should return just id\n resp = app.get(f'/{model}?report_type.lower().contains(\"vm\")')\n assert resp.status_code == 200\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0] == {\n '_id': r2['_id'],\n }\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_unknown_property_in_object(model, context, app, mocker):\n _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?select(notes.nothere)')\n assert error(resp) == 'FieldNotInResource'\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_startswith(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # single field search\n resp = app.get(f'/{model}?report_type.startswith(\"VM\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # single field search, case insensitive\n resp = app.get(f'/{model}?report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?status.startswith(\"in\")&report_type.lower().startswith(\"vm\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # multi field and multi operator search\n # test if operators are joined with AND logic\n resp = app.get(f'/{model}?report_type.lower().startswith(\"st\")&status.lower()=\"ok\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # sanity check that `startswith` searches from the start\n resp = app.get(f'/{model}?status.startswith(\"valid\")')\n data = resp.json()['_data']\n assert len(data) == 0\n\n # `startswith` type check\n resp = app.get(f'/{model}?notes.create_date.startswith(\"2019-04-20\")')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"InvalidValue\"]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n\n app.authmodel(model, ['search'])\n\n # nested `exact` search\n resp = app.get(f'/{model}?notes.note=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search, case insensitive\n resp = app.get(f'/{model}?notes.note.lower()=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n # nested `exact` search with dates\n resp = app.get(f'/{model}?notes.create_date=\"2019-03-14\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r1['_id']\n\n # nested `gt` search\n resp = app.get(f'/{model}?notes.create_date>\"2019-04-01\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n # nested non existant field\n resp = app.get(f'/{model}?notes.foo.bar=\"baz\"')\n assert resp.status_code == 400\n assert get_error_codes(resp.json()) == [\"FieldNotInResource\"]\n\n # nested `contains` search\n resp = app.get(f'/{model}?notes.note.contains(\"bar\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_contains(model, context, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?operating_licenses.license_types.contains(\"lid\")')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_startswith(model, context, app):\n app.authmodel(model, ['search'])\n r1, r2, r3, = _push_test_data(app, model)\n\n # nested `startswith` search\n resp = app.get(f'/{model}?notes.note.startswith(\"fo\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n resp = app.get(f'/{model}?operating_licenses.license_types.startswith(\"exp\")')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r2['_id']\n\n\ndef ids(resources):\n if isinstance(resources, (requests.models.Response, httpx.Response)):\n resp = resources\n assert resp.status_code == 200, resp.json()\n resources = resp.json()['_data']\n return [r['_id'] for r in resources]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_or(model, context, app):\n ids = RowIds(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?count=42|status.lower()=\"ok\"')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?count<=10|count=13')\n assert ids(resp) == [0, 2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse(model, context, app):\n r1, r2, r3, = _push_test_data(app, model)\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(note)=\"foo bar\"')\n data = resp.json()['_data']\n assert len(data) == 1\n assert data[0]['_id'] == r3['_id']\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_nested_recurse_lower(model, context, app):\n r1, r2, r3, = ids(_push_test_data(app, model))\n app.authmodel(model, ['search'])\n resp = app.get(f'/{model}?recurse(status).lower()=\"ok\"')\n assert ids(resp) == [r1]\n\n\[email protected](\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_nested_recurse_multiple_props(model, context, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'fi'},\n {'govid': '2', 'country': 'se'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'no'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country)=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country)=\"no\"')\n assert ids(resp) == [r2]\n\n\[email protected](\n 'backends/mongo/recurse',\n 'backends/postgres/recurse',\n)\ndef test_search_recurse_multiple_props_lower(model, app):\n r1, r2, = ids(_push_test_data(app, model, [\n {\n 'title': \"Org\",\n 'country': 'fi',\n 'govids': [\n {'govid': '1', 'country': 'FI'},\n {'govid': '2', 'country': 'SE'},\n ]\n },\n {\n 'title': \"Org\",\n 'country': 'no',\n 'govids': [\n {'govid': '3', 'country': 'NO'},\n ]\n },\n ]))\n app.authmodel(model, ['search'])\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"se\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"fi\"')\n assert ids(resp) == [r1]\n\n resp = app.get(f'/{model}?recurse(country).lower()=\"no\"')\n assert ids(resp) == [r2]\n\n\n# TODO: add mongo\ndef test_search_any(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",count,10,42)')\n assert ids(resp) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",count,42)')\n assert ids(resp) == [0, 2]\n\n\n# TODO: add mongo\ndef test_search_any_in_list(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",notes.note,\"hello\",\"world\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",notes.note,\"foo bar\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_in_list_of_scalars(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",operating_licenses.license_types,\"valid\",\"invalid\",\"expired\")')\n assert sorted(ids(resp)) == [0, 1]\n\n resp = app.get(f'/{model}?any(\"ne\",operating_licenses.license_types,\"expired\")')\n assert sorted(ids(resp)) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status),\"OK\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"eq\",recurse(status).lower(),\"ok\",\"none\")')\n assert ids(resp) == [0]\n\n\n# TODO: add mongo\ndef test_search_any_contains(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",status,\"inv\",\"val\",\"lid\")')\n assert sorted(ids(resp)) == [1, 2]\n\n\n# TODO: add mongo\ndef test_search_any_contains_nested(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",notes.note,\"hel\",\"wor\")')\n assert sorted(ids(resp)) == [0, 1]\n\n\n# TODO: add mongo\ndef test_search_any_contains_recurse_lower(app):\n model = 'backends/postgres/report'\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?any(\"contains\",recurse(status).lower(),\"o\",\"k\")')\n assert sorted(ids(resp)) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.contains(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_contains(model, app):\n app.authmodel(model, ['search', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_id.contains(\"AAAAA\")')\n assert ids(resp) == []\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][:5]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_id_not_startswith(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n subid = ids[0][5:10]\n resp = app.get(f'/{model}?_id.startswith(\"{subid}\")')\n assert ids(resp) == []\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_contains(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?_revision.contains(\"-\")')\n assert sorted(ids(resp)) == [0, 1, 2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_revision_startswith(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision'][:5]\n resp = app.get(f'/{model}?_revision.startswith(\"{revision}\")')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_group(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"&status=\"OK\")')\n assert ids(resp) == [0]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_select_in_or(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?(report_type=\"STV\"|status=\"OK\")&select(_id)')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_lower_contains(model, app):\n app.authmodel(model, ['search', 'getone'])\n ids = RowIds(_push_test_data(app, model))\n resp = app.get(f'/{model}?report_type.lower().contains(\"st\")')\n # XXX: Flaky test, some times it gives [2, 0], don't know why.\n assert ids(resp) == [0, 2]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status=null')\n assert ids(resp) == [1]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_search_not_null(model, app):\n app.authmodel(model, ['search'])\n ids = RowIds(_push_test_data(app, model, [\n {'status': 'OK'},\n {},\n ]))\n resp = app.get(f'/{model}?status!=null')\n assert ids(resp) == [0]\n\n\[email protected]('backend', ['default', 'mongo'])\ndef test_extra_fields(postgresql, mongo, backend, rc, tmp_path, request):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n request.addfinalizer(context.wipe_all)\n app = create_test_client(context)\n app.authmodel('extrafields', ['insert'])\n resp = app.post('/extrafields', json={'_data': [\n {'_op': 'insert', 'code': 'lt', 'name': 'Lietuva'},\n {'_op': 'insert', 'code': 'lv', 'name': 'Latvija'},\n {'_op': 'insert', 'code': 'ee', 'name': 'Estija'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n extrafields |\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('extrafields', ['getall', 'getone'])\n resp = app.get('/extrafields')\n assert listdata(resp, sort=True) == [\n \"Estija\",\n \"Latvija\",\n \"Lietuva\",\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/extrafields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'name': 'Lietuva'}\n\n\[email protected]('backend', ['mongo'])\ndef test_missing_fields(postgresql, mongo, backend, rc, tmp_path):\n rc = rc.fork({\n 'backends': [backend],\n 'manifests.default': {\n 'type': 'tabular',\n 'path': str(tmp_path / 'manifest.csv'),\n 'backend': backend,\n },\n })\n\n # Create data into a extrafields model with code and name properties.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['insert'])\n resp = app.post('/missingfields', json={'_data': [\n {'_op': 'insert', 'code': 'lt'},\n {'_op': 'insert', 'code': 'lv'},\n {'_op': 'insert', 'code': 'ee'},\n ]})\n assert resp.status_code == 200, resp.json()\n\n # Now try to read from same model, but loaded with just one property.\n create_tabular_manifest(tmp_path / 'manifest.csv', striptable('''\n m | property | type\n missingfields |\n | code | string\n | name | string\n '''))\n context = create_test_context(rc)\n app = create_test_client(context)\n app.authmodel('missingfields', ['search', 'getone'])\n resp = app.get('/missingfields?select(_id,code,name)')\n assert listdata(resp, sort=True) == [\n ('ee', None),\n ('lt', None),\n ('lv', None),\n ]\n\n pk = resp.json()['_data'][0]['_id']\n resp = app.get(f'/missingfields/{pk}')\n data = resp.json()\n assert resp.status_code == 200, data\n assert take(data) == {'code': 'lt'}\n\n\ndef test_base_select(rc, postgresql, request):\n context = bootstrap_manifest(rc, '''\n d | r | b | m | property | type | ref\n datasets/gov/example/base | |\n | |\n | | | Location | |\n | | | | id | integer |\n | | | | name | string |\n | | | | type | string |\n | |\n | | Location | |\n | | | City | |\n | | | | id | |\n | | | | name | string |\n | | | | population | integer |\n ''', backend=postgresql, request=request)\n\n app = create_test_client(context)\n app.authorize(['spinta_set_meta_fields'])\n app.authmodel('datasets/gov/example/base/Location', ['insert', 'delete'])\n app.authmodel('datasets/gov/example/base/City', ['insert', 'delete', 'getall', 'search'])\n\n _id = str(uuid.uuid4())\n app.post('/datasets/gov/example/base/Location', json={\n '_id': _id,\n 'id': 1,\n 'name': 'Base location',\n 'type': 'city'\n })\n app.post('/datasets/gov/example/base/City', json={\n '_id': _id,\n 'name': 'City',\n 'population': 100\n })\n\n resp = app.get('/datasets/gov/example/base/City?select(id,name,_base.name,population,_base.type)')\n assert resp.json()['_data'] == [\n {\n '_base': {'name': 'Base location', 'type': 'city'},\n 'id': 1,\n 'name': 'City',\n 'population': 100\n }\n ]\n\n\[email protected](\n 'backends/mongo/report',\n 'backends/postgres/report',\n)\ndef test_select_revision(model, app):\n app.authmodel(model, ['search', 'getone', 'getall'])\n ids = RowIds(_push_test_data(app, model))\n id0 = ids[0]\n resp = app.get(f'/{model}/{id0}')\n revision = resp.json()['_revision']\n resp = app.get(f'/{model}/:format/jsonl?limit(1)&select(_revision)')\n assert json.loads(resp.content) == {\n '_revision': revision\n }\n",
"step-ids": [
43,
48,
56,
58,
63
]
}
|
[
43,
48,
56,
58,
63
] |
include_rules = [
"+apps",
"+components/live_caption",
"+services/device/public",
"+components/device_reauth",
# Enable remote assistance on Chrome OS
"+remoting/host",
]
specific_include_rules = {
".*test.*": [
"+chrome/browser/ui/views/frame",
"+components/captive_portal",
"+components/web_package",
"+skia/public/mojom/bitmap.mojom.h",
],
"tls_socket_unittest\.cc": [
"+services/network/network_context.h",
],
"tcp_socket_unittest\.cc": [
"+services/network/network_context.h",
],
"udp_socket_unittest\.cc": [
"+services/network/network_context.h",
],
}
|
normal
|
{
"blob_id": "728af8b07bc391b496709e54926f3f1f49897176",
"index": 1992,
"step-1": "<mask token>\n",
"step-2": "include_rules = ['+apps', '+components/live_caption',\n '+services/device/public', '+components/device_reauth', '+remoting/host']\nspecific_include_rules = {'.*test.*': ['+chrome/browser/ui/views/frame',\n '+components/captive_portal', '+components/web_package',\n '+skia/public/mojom/bitmap.mojom.h'], 'tls_socket_unittest\\\\.cc': [\n '+services/network/network_context.h'], 'tcp_socket_unittest\\\\.cc': [\n '+services/network/network_context.h'], 'udp_socket_unittest\\\\.cc': [\n '+services/network/network_context.h']}\n",
"step-3": "include_rules = [\n \"+apps\",\n \"+components/live_caption\",\n \"+services/device/public\",\n \"+components/device_reauth\",\n # Enable remote assistance on Chrome OS\n \"+remoting/host\",\n]\n\nspecific_include_rules = {\n \".*test.*\": [\n \"+chrome/browser/ui/views/frame\",\n \"+components/captive_portal\",\n \"+components/web_package\",\n \"+skia/public/mojom/bitmap.mojom.h\",\n ],\n \"tls_socket_unittest\\.cc\": [\n \"+services/network/network_context.h\",\n ],\n \"tcp_socket_unittest\\.cc\": [\n \"+services/network/network_context.h\",\n ],\n \"udp_socket_unittest\\.cc\": [\n \"+services/network/network_context.h\",\n ],\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
'''
Created on Mar 19, 2019
@author: malte
'''
import gc
import pickle
from hyperopt import tpe, hp
from hyperopt.base import Trials
from hyperopt.fmin import fmin
from config.globals import BASE_PATH
from domain.features import FEATURES
from evaluate import evaluate
from featuregen.create_set import create_set
from helper.df_ops import train_test_split_idx
import lightgbm as lgbm
import numpy as np
import pandas as pd
#PATH
RAW = 'raw/'
SET = 'sample/'
CONF = {
'train_only': False,
'pop_hidden': False,
'path_pop': BASE_PATH + SET,
'min_pop': None,
'price_hidden': False,
'path_price': BASE_PATH + SET,
'min_occurences': None,
'fillna_mean': False,
'path_session': BASE_PATH + SET,
'path_crawl': BASE_PATH + 'crawled/',
'path_poi': BASE_PATH + SET,
'path_meta': BASE_PATH + 'preprocessed/',
'meta_latent': 'd2v',
'path_latent': BASE_PATH + 'competition/',
}
#KEYS
DSKEY = 'dataset'
TRAILKEY = 'trails-lgbm'
def objective( params ):
train = create_set( base_path=BASE_PATH + SET, conf=CONF, key=DSKEY, redo=False )
test = train.query('train == 0')
train.query('train == 1', inplace=True)
X = train[ FEATURES + ['session_id'] ]
y = train[ 'label' ]
del train
gc.collect()
X_train, X_valid = train_test_split_idx( X, y, test_size=0.1, shuffle=params['shuffle'] )
print( 'shuffled sample ',params['shuffle'] )
if params['ltr']:
params['application'] = 'lambdarank'
params['metric'] = 'ndcg'
params['eval_at'] = '30'
else:
params['application'] = 'binary'
params['metric'] = 'binary_logloss'
if params['ltr']:
q_train = X.loc[X_train].groupby( ['session_id'] ).size().values.astype(np.float32)
q_valid = X.loc[X_valid].groupby( ['session_id'] ).size().values.astype(np.float32)
xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)
ytrain = y.loc[X_train].values.astype(np.float32)
del X_train
gc.collect()
d_train = lgbm.Dataset( xtrain, label=ytrain, group=q_train, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )
del q_train, xtrain, ytrain
gc.collect()
xval = X.loc[X_valid][FEATURES].values.astype(np.float32)
yval = y.loc[X_valid].values.astype(np.float32)
del X_valid
gc.collect()
d_valid = lgbm.Dataset( xval, label=yval, group=q_valid, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )
del q_valid, xval, yval
gc.collect()
else:
xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)
ytrain = y.loc[X_train].values.astype(np.float32)
d_train = lgbm.Dataset( xtrain, label=ytrain, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )
del xtrain, xtrain, X_train
gc.collect()
xval = X[X_valid][FEATURES].values.astype(np.float32)
yval = y[X_valid].values.astype(np.float32)
d_valid = lgbm.Dataset( xval, label=yval, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )
del xval, yval, X_valid
gc.collect()
watchlist = [d_train, d_valid]
evals_result = {}
model = lgbm.train( params, train_set=d_train, num_boost_round=10000, valid_sets=watchlist, early_stopping_rounds=int(params['early_stopping']), evals_result=evals_result, verbose_eval=10 )
X_test = test[ FEATURES ]
y_test = model.predict(X_test, num_iteration=model.best_iteration )
test['prob'] = y_test
test = test.sort_values(['session_id','prob'], ascending=False)
solution = pd.DataFrame()
solution['recommendations'] = test.groupby( 'session_id' ).impressions.apply( list )
solution['confidences'] = test.groupby( 'session_id' ).prob.apply( list )
solution.reset_index(drop=True)
solution = solution.merge( test[['session_id', 'user_id', 'timestamp', 'step']].drop_duplicates(keep='last'), on='session_id', how='inner' )
#solution.to_csv( BASE_PATH + '/' + SET + '/solution_' + ALGKEY + '.csv' )
result = evaluate( solution, base=BASE_PATH, dataset=SET )
print( result.T )
del solution,test,X_test,y_test,d_train, d_valid, watchlist
gc.collect()
return -1 * result['mrr@A'].values[0]
def main():
space = {
'ltr': hp.choice('ltr', [True]),
'shuffle': hp.choice('shuffle', [False]),
'num_leaves': hp.choice('num_leaves', list(np.arange(8, 256, 2, dtype=int) )),
'max_depth': hp.choice('max_depth', list(np.arange(4, 64, 2, dtype=int) )),
'max_bin': hp.choice('max_bin', list(np.arange(255, 255*4, 5, dtype=int) )),
'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5, 100, 5, dtype=int) )),
'learning_rate': hp.uniform('learning_rate', 0.01, 0.3),
'bagging_fraction': hp.uniform('bagging_fraction', 0.2, 1.0),
'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),
'early_stopping': hp.uniform('test_size', 100, 1000),
}
trials_step = 1 # how many additional trials to do after loading saved trials. 1 = save after iteration
max_trials = 1 # initial max_trials. put something small to not have to wait
try: # try to load an already saved trials object, and increase the max
trials = pickle.load(open( BASE_PATH + SET + TRAILKEY + '.hyperopt', "rb"))
print("Found saved Trials! Loading...")
max_trials = len(trials.trials) + trials_step
print("Rerunning from {} trials to {} (+{}) trials".format(len(trials.trials), max_trials, trials_step))
except: # create a new trials object and start searching
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
trials=trials,
max_evals=max_trials)
print("Best:", best)
print("Num:", max_trials)
# save the trials object
with open(BASE_PATH + SET + TRAILKEY + ".hyperopt", "wb") as f:
pickle.dump(trials, f)
if __name__ == '__main__':
while True:
main()
|
normal
|
{
"blob_id": "daf070291bbf59a7a06b129bbde5fd79b5cd46ad",
"index": 6715,
"step-1": "<mask token>\n\n\ndef objective(params):\n train = create_set(base_path=BASE_PATH + SET, conf=CONF, key=DSKEY,\n redo=False)\n test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n X = train[FEATURES + ['session_id']]\n y = train['label']\n del train\n gc.collect()\n X_train, X_valid = train_test_split_idx(X, y, test_size=0.1, shuffle=\n params['shuffle'])\n print('shuffled sample ', params['shuffle'])\n if params['ltr']:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n if params['ltr']:\n q_train = X.loc[X_train].groupby(['session_id']).size().values.astype(\n np.float32)\n q_valid = X.loc[X_valid].groupby(['session_id']).size().values.astype(\n np.float32)\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n del X_train\n gc.collect()\n d_train = lgbm.Dataset(xtrain, label=ytrain, group=q_train,\n feature_name=FEATURES)\n del q_train, xtrain, ytrain\n gc.collect()\n xval = X.loc[X_valid][FEATURES].values.astype(np.float32)\n yval = y.loc[X_valid].values.astype(np.float32)\n del X_valid\n gc.collect()\n d_valid = lgbm.Dataset(xval, label=yval, group=q_valid,\n feature_name=FEATURES)\n del q_valid, xval, yval\n gc.collect()\n else:\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n d_train = lgbm.Dataset(xtrain, label=ytrain, feature_name=FEATURES)\n del xtrain, xtrain, X_train\n gc.collect()\n xval = X[X_valid][FEATURES].values.astype(np.float32)\n yval = y[X_valid].values.astype(np.float32)\n d_valid = lgbm.Dataset(xval, label=yval, feature_name=FEATURES)\n del xval, yval, X_valid\n gc.collect()\n watchlist = [d_train, d_valid]\n evals_result = {}\n model = lgbm.train(params, train_set=d_train, num_boost_round=10000,\n valid_sets=watchlist, early_stopping_rounds=int(params[\n 'early_stopping']), evals_result=evals_result, verbose_eval=10)\n X_test = test[FEATURES]\n y_test = model.predict(X_test, num_iteration=model.best_iteration)\n test['prob'] = y_test\n test = test.sort_values(['session_id', 'prob'], ascending=False)\n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby('session_id').impressions.apply(\n list)\n solution['confidences'] = test.groupby('session_id').prob.apply(list)\n solution.reset_index(drop=True)\n solution = solution.merge(test[['session_id', 'user_id', 'timestamp',\n 'step']].drop_duplicates(keep='last'), on='session_id', how='inner')\n result = evaluate(solution, base=BASE_PATH, dataset=SET)\n print(result.T)\n del solution, test, X_test, y_test, d_train, d_valid, watchlist\n gc.collect()\n return -1 * result['mrr@A'].values[0]\n\n\ndef main():\n space = {'ltr': hp.choice('ltr', [True]), 'shuffle': hp.choice(\n 'shuffle', [False]), 'num_leaves': hp.choice('num_leaves', list(np.\n arange(8, 256, 2, dtype=int))), 'max_depth': hp.choice('max_depth',\n list(np.arange(4, 64, 2, dtype=int))), 'max_bin': hp.choice(\n 'max_bin', list(np.arange(255, 255 * 4, 5, dtype=int))),\n 'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5,\n 100, 5, dtype=int))), 'learning_rate': hp.uniform('learning_rate', \n 0.01, 0.3), 'bagging_fraction': hp.uniform('bagging_fraction', 0.2,\n 1.0), 'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),\n 'early_stopping': hp.uniform('test_size', 100, 1000)}\n trials_step = 1\n max_trials = 1\n try:\n trials = pickle.load(open(BASE_PATH + SET + TRAILKEY + '.hyperopt',\n 'rb'))\n print('Found saved Trials! Loading...')\n max_trials = len(trials.trials) + trials_step\n print('Rerunning from {} trials to {} (+{}) trials'.format(len(\n trials.trials), max_trials, trials_step))\n except:\n trials = Trials()\n best = fmin(fn=objective, space=space, algo=tpe.suggest, trials=trials,\n max_evals=max_trials)\n print('Best:', best)\n print('Num:', max_trials)\n with open(BASE_PATH + SET + TRAILKEY + '.hyperopt', 'wb') as f:\n pickle.dump(trials, f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef objective(params):\n train = create_set(base_path=BASE_PATH + SET, conf=CONF, key=DSKEY,\n redo=False)\n test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n X = train[FEATURES + ['session_id']]\n y = train['label']\n del train\n gc.collect()\n X_train, X_valid = train_test_split_idx(X, y, test_size=0.1, shuffle=\n params['shuffle'])\n print('shuffled sample ', params['shuffle'])\n if params['ltr']:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n if params['ltr']:\n q_train = X.loc[X_train].groupby(['session_id']).size().values.astype(\n np.float32)\n q_valid = X.loc[X_valid].groupby(['session_id']).size().values.astype(\n np.float32)\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n del X_train\n gc.collect()\n d_train = lgbm.Dataset(xtrain, label=ytrain, group=q_train,\n feature_name=FEATURES)\n del q_train, xtrain, ytrain\n gc.collect()\n xval = X.loc[X_valid][FEATURES].values.astype(np.float32)\n yval = y.loc[X_valid].values.astype(np.float32)\n del X_valid\n gc.collect()\n d_valid = lgbm.Dataset(xval, label=yval, group=q_valid,\n feature_name=FEATURES)\n del q_valid, xval, yval\n gc.collect()\n else:\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n d_train = lgbm.Dataset(xtrain, label=ytrain, feature_name=FEATURES)\n del xtrain, xtrain, X_train\n gc.collect()\n xval = X[X_valid][FEATURES].values.astype(np.float32)\n yval = y[X_valid].values.astype(np.float32)\n d_valid = lgbm.Dataset(xval, label=yval, feature_name=FEATURES)\n del xval, yval, X_valid\n gc.collect()\n watchlist = [d_train, d_valid]\n evals_result = {}\n model = lgbm.train(params, train_set=d_train, num_boost_round=10000,\n valid_sets=watchlist, early_stopping_rounds=int(params[\n 'early_stopping']), evals_result=evals_result, verbose_eval=10)\n X_test = test[FEATURES]\n y_test = model.predict(X_test, num_iteration=model.best_iteration)\n test['prob'] = y_test\n test = test.sort_values(['session_id', 'prob'], ascending=False)\n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby('session_id').impressions.apply(\n list)\n solution['confidences'] = test.groupby('session_id').prob.apply(list)\n solution.reset_index(drop=True)\n solution = solution.merge(test[['session_id', 'user_id', 'timestamp',\n 'step']].drop_duplicates(keep='last'), on='session_id', how='inner')\n result = evaluate(solution, base=BASE_PATH, dataset=SET)\n print(result.T)\n del solution, test, X_test, y_test, d_train, d_valid, watchlist\n gc.collect()\n return -1 * result['mrr@A'].values[0]\n\n\ndef main():\n space = {'ltr': hp.choice('ltr', [True]), 'shuffle': hp.choice(\n 'shuffle', [False]), 'num_leaves': hp.choice('num_leaves', list(np.\n arange(8, 256, 2, dtype=int))), 'max_depth': hp.choice('max_depth',\n list(np.arange(4, 64, 2, dtype=int))), 'max_bin': hp.choice(\n 'max_bin', list(np.arange(255, 255 * 4, 5, dtype=int))),\n 'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5,\n 100, 5, dtype=int))), 'learning_rate': hp.uniform('learning_rate', \n 0.01, 0.3), 'bagging_fraction': hp.uniform('bagging_fraction', 0.2,\n 1.0), 'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),\n 'early_stopping': hp.uniform('test_size', 100, 1000)}\n trials_step = 1\n max_trials = 1\n try:\n trials = pickle.load(open(BASE_PATH + SET + TRAILKEY + '.hyperopt',\n 'rb'))\n print('Found saved Trials! Loading...')\n max_trials = len(trials.trials) + trials_step\n print('Rerunning from {} trials to {} (+{}) trials'.format(len(\n trials.trials), max_trials, trials_step))\n except:\n trials = Trials()\n best = fmin(fn=objective, space=space, algo=tpe.suggest, trials=trials,\n max_evals=max_trials)\n print('Best:', best)\n print('Num:', max_trials)\n with open(BASE_PATH + SET + TRAILKEY + '.hyperopt', 'wb') as f:\n pickle.dump(trials, f)\n\n\nif __name__ == '__main__':\n while True:\n main()\n",
"step-3": "<mask token>\nRAW = 'raw/'\nSET = 'sample/'\nCONF = {'train_only': False, 'pop_hidden': False, 'path_pop': BASE_PATH +\n SET, 'min_pop': None, 'price_hidden': False, 'path_price': BASE_PATH +\n SET, 'min_occurences': None, 'fillna_mean': False, 'path_session': \n BASE_PATH + SET, 'path_crawl': BASE_PATH + 'crawled/', 'path_poi': \n BASE_PATH + SET, 'path_meta': BASE_PATH + 'preprocessed/',\n 'meta_latent': 'd2v', 'path_latent': BASE_PATH + 'competition/'}\nDSKEY = 'dataset'\nTRAILKEY = 'trails-lgbm'\n\n\ndef objective(params):\n train = create_set(base_path=BASE_PATH + SET, conf=CONF, key=DSKEY,\n redo=False)\n test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n X = train[FEATURES + ['session_id']]\n y = train['label']\n del train\n gc.collect()\n X_train, X_valid = train_test_split_idx(X, y, test_size=0.1, shuffle=\n params['shuffle'])\n print('shuffled sample ', params['shuffle'])\n if params['ltr']:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n if params['ltr']:\n q_train = X.loc[X_train].groupby(['session_id']).size().values.astype(\n np.float32)\n q_valid = X.loc[X_valid].groupby(['session_id']).size().values.astype(\n np.float32)\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n del X_train\n gc.collect()\n d_train = lgbm.Dataset(xtrain, label=ytrain, group=q_train,\n feature_name=FEATURES)\n del q_train, xtrain, ytrain\n gc.collect()\n xval = X.loc[X_valid][FEATURES].values.astype(np.float32)\n yval = y.loc[X_valid].values.astype(np.float32)\n del X_valid\n gc.collect()\n d_valid = lgbm.Dataset(xval, label=yval, group=q_valid,\n feature_name=FEATURES)\n del q_valid, xval, yval\n gc.collect()\n else:\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n d_train = lgbm.Dataset(xtrain, label=ytrain, feature_name=FEATURES)\n del xtrain, xtrain, X_train\n gc.collect()\n xval = X[X_valid][FEATURES].values.astype(np.float32)\n yval = y[X_valid].values.astype(np.float32)\n d_valid = lgbm.Dataset(xval, label=yval, feature_name=FEATURES)\n del xval, yval, X_valid\n gc.collect()\n watchlist = [d_train, d_valid]\n evals_result = {}\n model = lgbm.train(params, train_set=d_train, num_boost_round=10000,\n valid_sets=watchlist, early_stopping_rounds=int(params[\n 'early_stopping']), evals_result=evals_result, verbose_eval=10)\n X_test = test[FEATURES]\n y_test = model.predict(X_test, num_iteration=model.best_iteration)\n test['prob'] = y_test\n test = test.sort_values(['session_id', 'prob'], ascending=False)\n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby('session_id').impressions.apply(\n list)\n solution['confidences'] = test.groupby('session_id').prob.apply(list)\n solution.reset_index(drop=True)\n solution = solution.merge(test[['session_id', 'user_id', 'timestamp',\n 'step']].drop_duplicates(keep='last'), on='session_id', how='inner')\n result = evaluate(solution, base=BASE_PATH, dataset=SET)\n print(result.T)\n del solution, test, X_test, y_test, d_train, d_valid, watchlist\n gc.collect()\n return -1 * result['mrr@A'].values[0]\n\n\ndef main():\n space = {'ltr': hp.choice('ltr', [True]), 'shuffle': hp.choice(\n 'shuffle', [False]), 'num_leaves': hp.choice('num_leaves', list(np.\n arange(8, 256, 2, dtype=int))), 'max_depth': hp.choice('max_depth',\n list(np.arange(4, 64, 2, dtype=int))), 'max_bin': hp.choice(\n 'max_bin', list(np.arange(255, 255 * 4, 5, dtype=int))),\n 'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5,\n 100, 5, dtype=int))), 'learning_rate': hp.uniform('learning_rate', \n 0.01, 0.3), 'bagging_fraction': hp.uniform('bagging_fraction', 0.2,\n 1.0), 'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),\n 'early_stopping': hp.uniform('test_size', 100, 1000)}\n trials_step = 1\n max_trials = 1\n try:\n trials = pickle.load(open(BASE_PATH + SET + TRAILKEY + '.hyperopt',\n 'rb'))\n print('Found saved Trials! Loading...')\n max_trials = len(trials.trials) + trials_step\n print('Rerunning from {} trials to {} (+{}) trials'.format(len(\n trials.trials), max_trials, trials_step))\n except:\n trials = Trials()\n best = fmin(fn=objective, space=space, algo=tpe.suggest, trials=trials,\n max_evals=max_trials)\n print('Best:', best)\n print('Num:', max_trials)\n with open(BASE_PATH + SET + TRAILKEY + '.hyperopt', 'wb') as f:\n pickle.dump(trials, f)\n\n\nif __name__ == '__main__':\n while True:\n main()\n",
"step-4": "<mask token>\nimport gc\nimport pickle\nfrom hyperopt import tpe, hp\nfrom hyperopt.base import Trials\nfrom hyperopt.fmin import fmin\nfrom config.globals import BASE_PATH\nfrom domain.features import FEATURES\nfrom evaluate import evaluate\nfrom featuregen.create_set import create_set\nfrom helper.df_ops import train_test_split_idx\nimport lightgbm as lgbm\nimport numpy as np\nimport pandas as pd\nRAW = 'raw/'\nSET = 'sample/'\nCONF = {'train_only': False, 'pop_hidden': False, 'path_pop': BASE_PATH +\n SET, 'min_pop': None, 'price_hidden': False, 'path_price': BASE_PATH +\n SET, 'min_occurences': None, 'fillna_mean': False, 'path_session': \n BASE_PATH + SET, 'path_crawl': BASE_PATH + 'crawled/', 'path_poi': \n BASE_PATH + SET, 'path_meta': BASE_PATH + 'preprocessed/',\n 'meta_latent': 'd2v', 'path_latent': BASE_PATH + 'competition/'}\nDSKEY = 'dataset'\nTRAILKEY = 'trails-lgbm'\n\n\ndef objective(params):\n train = create_set(base_path=BASE_PATH + SET, conf=CONF, key=DSKEY,\n redo=False)\n test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n X = train[FEATURES + ['session_id']]\n y = train['label']\n del train\n gc.collect()\n X_train, X_valid = train_test_split_idx(X, y, test_size=0.1, shuffle=\n params['shuffle'])\n print('shuffled sample ', params['shuffle'])\n if params['ltr']:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n if params['ltr']:\n q_train = X.loc[X_train].groupby(['session_id']).size().values.astype(\n np.float32)\n q_valid = X.loc[X_valid].groupby(['session_id']).size().values.astype(\n np.float32)\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n del X_train\n gc.collect()\n d_train = lgbm.Dataset(xtrain, label=ytrain, group=q_train,\n feature_name=FEATURES)\n del q_train, xtrain, ytrain\n gc.collect()\n xval = X.loc[X_valid][FEATURES].values.astype(np.float32)\n yval = y.loc[X_valid].values.astype(np.float32)\n del X_valid\n gc.collect()\n d_valid = lgbm.Dataset(xval, label=yval, group=q_valid,\n feature_name=FEATURES)\n del q_valid, xval, yval\n gc.collect()\n else:\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n d_train = lgbm.Dataset(xtrain, label=ytrain, feature_name=FEATURES)\n del xtrain, xtrain, X_train\n gc.collect()\n xval = X[X_valid][FEATURES].values.astype(np.float32)\n yval = y[X_valid].values.astype(np.float32)\n d_valid = lgbm.Dataset(xval, label=yval, feature_name=FEATURES)\n del xval, yval, X_valid\n gc.collect()\n watchlist = [d_train, d_valid]\n evals_result = {}\n model = lgbm.train(params, train_set=d_train, num_boost_round=10000,\n valid_sets=watchlist, early_stopping_rounds=int(params[\n 'early_stopping']), evals_result=evals_result, verbose_eval=10)\n X_test = test[FEATURES]\n y_test = model.predict(X_test, num_iteration=model.best_iteration)\n test['prob'] = y_test\n test = test.sort_values(['session_id', 'prob'], ascending=False)\n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby('session_id').impressions.apply(\n list)\n solution['confidences'] = test.groupby('session_id').prob.apply(list)\n solution.reset_index(drop=True)\n solution = solution.merge(test[['session_id', 'user_id', 'timestamp',\n 'step']].drop_duplicates(keep='last'), on='session_id', how='inner')\n result = evaluate(solution, base=BASE_PATH, dataset=SET)\n print(result.T)\n del solution, test, X_test, y_test, d_train, d_valid, watchlist\n gc.collect()\n return -1 * result['mrr@A'].values[0]\n\n\ndef main():\n space = {'ltr': hp.choice('ltr', [True]), 'shuffle': hp.choice(\n 'shuffle', [False]), 'num_leaves': hp.choice('num_leaves', list(np.\n arange(8, 256, 2, dtype=int))), 'max_depth': hp.choice('max_depth',\n list(np.arange(4, 64, 2, dtype=int))), 'max_bin': hp.choice(\n 'max_bin', list(np.arange(255, 255 * 4, 5, dtype=int))),\n 'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5,\n 100, 5, dtype=int))), 'learning_rate': hp.uniform('learning_rate', \n 0.01, 0.3), 'bagging_fraction': hp.uniform('bagging_fraction', 0.2,\n 1.0), 'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),\n 'early_stopping': hp.uniform('test_size', 100, 1000)}\n trials_step = 1\n max_trials = 1\n try:\n trials = pickle.load(open(BASE_PATH + SET + TRAILKEY + '.hyperopt',\n 'rb'))\n print('Found saved Trials! Loading...')\n max_trials = len(trials.trials) + trials_step\n print('Rerunning from {} trials to {} (+{}) trials'.format(len(\n trials.trials), max_trials, trials_step))\n except:\n trials = Trials()\n best = fmin(fn=objective, space=space, algo=tpe.suggest, trials=trials,\n max_evals=max_trials)\n print('Best:', best)\n print('Num:', max_trials)\n with open(BASE_PATH + SET + TRAILKEY + '.hyperopt', 'wb') as f:\n pickle.dump(trials, f)\n\n\nif __name__ == '__main__':\n while True:\n main()\n",
"step-5": "'''\nCreated on Mar 19, 2019\n\n@author: malte\n'''\n\nimport gc\nimport pickle\n\nfrom hyperopt import tpe, hp\nfrom hyperopt.base import Trials\nfrom hyperopt.fmin import fmin\n\nfrom config.globals import BASE_PATH\nfrom domain.features import FEATURES\nfrom evaluate import evaluate\nfrom featuregen.create_set import create_set\nfrom helper.df_ops import train_test_split_idx\nimport lightgbm as lgbm\nimport numpy as np\nimport pandas as pd\n\n#PATH\nRAW = 'raw/'\nSET = 'sample/'\n\nCONF = {\n 'train_only': False,\n \n 'pop_hidden': False,\n 'path_pop': BASE_PATH + SET,\n 'min_pop': None,\n \n 'price_hidden': False,\n 'path_price': BASE_PATH + SET,\n 'min_occurences': None,\n 'fillna_mean': False,\n \n 'path_session': BASE_PATH + SET,\n \n 'path_crawl': BASE_PATH + 'crawled/',\n 'path_poi': BASE_PATH + SET,\n \n 'path_meta': BASE_PATH + 'preprocessed/',\n 'meta_latent': 'd2v',\n \n 'path_latent': BASE_PATH + 'competition/',\n}\n\n#KEYS\nDSKEY = 'dataset'\nTRAILKEY = 'trails-lgbm'\n\ndef objective( params ):\n \n train = create_set( base_path=BASE_PATH + SET, conf=CONF, key=DSKEY, redo=False )\n \n test = train.query('train == 0')\n train.query('train == 1', inplace=True)\n \n X = train[ FEATURES + ['session_id'] ]\n y = train[ 'label' ]\n \n del train\n gc.collect()\n \n X_train, X_valid = train_test_split_idx( X, y, test_size=0.1, shuffle=params['shuffle'] )\n print( 'shuffled sample ',params['shuffle'] )\n \n if params['ltr']:\n params['application'] = 'lambdarank'\n params['metric'] = 'ndcg'\n params['eval_at'] = '30'\n else:\n params['application'] = 'binary'\n params['metric'] = 'binary_logloss'\n \n if params['ltr']:\n q_train = X.loc[X_train].groupby( ['session_id'] ).size().values.astype(np.float32)\n q_valid = X.loc[X_valid].groupby( ['session_id'] ).size().values.astype(np.float32)\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n del X_train\n gc.collect()\n d_train = lgbm.Dataset( xtrain, label=ytrain, group=q_train, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )\n del q_train, xtrain, ytrain\n gc.collect()\n xval = X.loc[X_valid][FEATURES].values.astype(np.float32)\n yval = y.loc[X_valid].values.astype(np.float32)\n del X_valid\n gc.collect()\n d_valid = lgbm.Dataset( xval, label=yval, group=q_valid, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )\n del q_valid, xval, yval\n gc.collect()\n else:\n xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)\n ytrain = y.loc[X_train].values.astype(np.float32)\n d_train = lgbm.Dataset( xtrain, label=ytrain, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )\n del xtrain, xtrain, X_train\n gc.collect()\n \n xval = X[X_valid][FEATURES].values.astype(np.float32)\n yval = y[X_valid].values.astype(np.float32)\n d_valid = lgbm.Dataset( xval, label=yval, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )\n del xval, yval, X_valid\n gc.collect()\n watchlist = [d_train, d_valid]\n \n evals_result = {}\n model = lgbm.train( params, train_set=d_train, num_boost_round=10000, valid_sets=watchlist, early_stopping_rounds=int(params['early_stopping']), evals_result=evals_result, verbose_eval=10 )\n \n X_test = test[ FEATURES ]\n y_test = model.predict(X_test, num_iteration=model.best_iteration )\n \n test['prob'] = y_test\n test = test.sort_values(['session_id','prob'], ascending=False)\n solution = pd.DataFrame()\n solution['recommendations'] = test.groupby( 'session_id' ).impressions.apply( list )\n solution['confidences'] = test.groupby( 'session_id' ).prob.apply( list )\n solution.reset_index(drop=True)\n solution = solution.merge( test[['session_id', 'user_id', 'timestamp', 'step']].drop_duplicates(keep='last'), on='session_id', how='inner' ) \n #solution.to_csv( BASE_PATH + '/' + SET + '/solution_' + ALGKEY + '.csv' )\n \n result = evaluate( solution, base=BASE_PATH, dataset=SET )\n print( result.T )\n \n del solution,test,X_test,y_test,d_train, d_valid, watchlist\n gc.collect()\n \n return -1 * result['mrr@A'].values[0]\n\n\ndef main():\n \n space = {\n 'ltr': hp.choice('ltr', [True]),\n 'shuffle': hp.choice('shuffle', [False]),\n 'num_leaves': hp.choice('num_leaves', list(np.arange(8, 256, 2, dtype=int) )),\n 'max_depth': hp.choice('max_depth', list(np.arange(4, 64, 2, dtype=int) )),\n 'max_bin': hp.choice('max_bin', list(np.arange(255, 255*4, 5, dtype=int) )),\n 'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5, 100, 5, dtype=int) )),\n 'learning_rate': hp.uniform('learning_rate', 0.01, 0.3),\n 'bagging_fraction': hp.uniform('bagging_fraction', 0.2, 1.0),\n 'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),\n 'early_stopping': hp.uniform('test_size', 100, 1000),\n }\n\n trials_step = 1 # how many additional trials to do after loading saved trials. 1 = save after iteration\n max_trials = 1 # initial max_trials. put something small to not have to wait\n\n \n try: # try to load an already saved trials object, and increase the max\n trials = pickle.load(open( BASE_PATH + SET + TRAILKEY + '.hyperopt', \"rb\"))\n print(\"Found saved Trials! Loading...\")\n max_trials = len(trials.trials) + trials_step\n print(\"Rerunning from {} trials to {} (+{}) trials\".format(len(trials.trials), max_trials, trials_step))\n except: # create a new trials object and start searching\n trials = Trials()\n\n best = fmin(fn=objective,\n space=space,\n algo=tpe.suggest,\n trials=trials,\n max_evals=max_trials)\n \n print(\"Best:\", best)\n print(\"Num:\", max_trials)\n \n # save the trials object\n with open(BASE_PATH + SET + TRAILKEY + \".hyperopt\", \"wb\") as f:\n pickle.dump(trials, f)\n\n\nif __name__ == '__main__':\n while True:\n main()\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
class Car:
__name=""
__maxspeed = 0
def __init__(self):
self.__updateSoftware()
self.__name = "Supercar"
self.__maxspeed=320
def drive(self):
print("Driving")
print("name of the car " + self.__name)
def __updateSoftware(self):
print("Updating Software")
def sayHello(self,name=None):
if name is None:
print ("Hello")
else:
print("Hello" + name)
redcar = Car()
redcar.sayHello()
redcar.sayHello("Venky")
redcar.drive()
print(redcar._Car__maxspeed)
#redcar._Car__updateSoftware()
|
normal
|
{
"blob_id": "318556a6c327294986fcef938c254b8dfe66adaa",
"index": 6375,
"step-1": "class Car:\n <mask token>\n <mask token>\n\n def __init__(self):\n self.__updateSoftware()\n self.__name = 'Supercar'\n self.__maxspeed = 320\n\n def drive(self):\n print('Driving')\n print('name of the car ' + self.__name)\n\n def __updateSoftware(self):\n print('Updating Software')\n\n def sayHello(self, name=None):\n if name is None:\n print('Hello')\n else:\n print('Hello' + name)\n\n\n<mask token>\n",
"step-2": "class Car:\n __name = ''\n __maxspeed = 0\n\n def __init__(self):\n self.__updateSoftware()\n self.__name = 'Supercar'\n self.__maxspeed = 320\n\n def drive(self):\n print('Driving')\n print('name of the car ' + self.__name)\n\n def __updateSoftware(self):\n print('Updating Software')\n\n def sayHello(self, name=None):\n if name is None:\n print('Hello')\n else:\n print('Hello' + name)\n\n\n<mask token>\n",
"step-3": "class Car:\n __name = ''\n __maxspeed = 0\n\n def __init__(self):\n self.__updateSoftware()\n self.__name = 'Supercar'\n self.__maxspeed = 320\n\n def drive(self):\n print('Driving')\n print('name of the car ' + self.__name)\n\n def __updateSoftware(self):\n print('Updating Software')\n\n def sayHello(self, name=None):\n if name is None:\n print('Hello')\n else:\n print('Hello' + name)\n\n\n<mask token>\nredcar.sayHello()\nredcar.sayHello('Venky')\nredcar.drive()\nprint(redcar._Car__maxspeed)\n",
"step-4": "class Car:\n __name = ''\n __maxspeed = 0\n\n def __init__(self):\n self.__updateSoftware()\n self.__name = 'Supercar'\n self.__maxspeed = 320\n\n def drive(self):\n print('Driving')\n print('name of the car ' + self.__name)\n\n def __updateSoftware(self):\n print('Updating Software')\n\n def sayHello(self, name=None):\n if name is None:\n print('Hello')\n else:\n print('Hello' + name)\n\n\nredcar = Car()\nredcar.sayHello()\nredcar.sayHello('Venky')\nredcar.drive()\nprint(redcar._Car__maxspeed)\n",
"step-5": "class Car:\n __name=\"\"\n __maxspeed = 0\n\n\n def __init__(self):\n self.__updateSoftware()\n self.__name = \"Supercar\"\n self.__maxspeed=320\n\n\n def drive(self):\n print(\"Driving\")\n print(\"name of the car \" + self.__name)\n\n\n def __updateSoftware(self):\n print(\"Updating Software\")\n\n def sayHello(self,name=None):\n if name is None:\n print (\"Hello\")\n else:\n print(\"Hello\" + name)\n\n\nredcar = Car()\nredcar.sayHello()\nredcar.sayHello(\"Venky\")\nredcar.drive()\nprint(redcar._Car__maxspeed)\n#redcar._Car__updateSoftware()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
import pygame
import os
from time import sleep
screen = pygame.display.set_mode((900,700))
screen.fill((255,255,255))
pygame.display.set_caption("NTUFOODIERECOMMENDSYSTEM")
'''
###########################
──╔╗────╔╗
──║║───╔╝╚╗
╔═╝╠╦══╬╗╔╬╦══╦═╗╔══╦═╦╗─╔╗
║╔╗╠╣╔═╝║║╠╣╔╗║╔╗╣╔╗║╔╣║─║║
║╚╝║║╚═╗║╚╣║╚╝║║║║╔╗║║║╚═╝║
╚══╩╩══╝╚═╩╩══╩╝╚╩╝╚╩╝╚═╗╔╝
──────────────────────╔═╝║
──────────────────────╚══╝
###########################
● Database is stored on site.
● Updating is relatively simple.
● Programme runs on the basis of pygame, it's hard to update it without text input.
● However, it can easily be done so on shell/console accordingly.
'''
# Food court lists is sorted by [Highest Cost, Lowest Cost, Cuisines Available, Closing Time, Food Preferences Available, Coordinates on NTU Map] ; THE items have keys and corresponding values expressed as a pair, key: value
# where the keys would be that of the canteen names and this would be associated with that of the corresponding properties tht is alloted to it.
canteen_list = {
"Food Court 1": [12, 3.5, ["Korean", "Japanese", "Western"], 2100, ["Halal", "Non-Halal/Non-Vegetarian"], (442, 473)],
"Food Court 2": [10, 3.6, ["Korean", "Chinese", "Malay", ], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (477, 409)],
"Food Court 4": [10, 3, ["Chinese", "Western"], 2100, ["Non-Halal/Non-Vegetarian"], (358,526)],
"Food Court 9": [10, 3.5, ["Chinese"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (582, 288)],
"Food Court 11": [10, 2.5, ["Chinese", "Indian", "Japanese", "Western"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (682, 243)],
"Food Court 13": [9, 2, ["Western", "Korean", "Japanese", "Chinese"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (445, 176)],
"Food Court 14": [8, 3, ["Western", "Chinese", "Korean", "Malay"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (509, 182)],
"Food Court 16": [10, 3.3, ["Japanese", "Chinese", "Korean", "Indian"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (405, 221)],
"Tamarind Food Court": [10, 3, ["Malay", "Chinese", "Korean", "Western"], 2100, ["Halal", "Non-Halal", "Vegetarian","Non-Halal/Non-Vegetarian"], (627, 200)],
"Pioneer Food Court": [20, 2.3, ["Thai", "Chinese"], 0000, ["Vegetarian", "Non-Halal/Non-Vegetarian"], (497, 561)],
"North Spine Food Court": [10, 2.5, ["Korean", "Japanese", "Chinese", "Western", "Malay"], 2100, ["Vegetarian", "Non-Halal/Non-Vegetarian"], (275, 293)],
"North Spine Plaza": [10, 4, ["Western", "Korean"], 2130, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (287, 339)],
"South Spine Food Court": [10, 2, ["Chinese", "Malay", "Korean", "Japanese", "Western"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (227, 496)],
"Quad Cafe": [10, 2.4, ["Korean", "Chinese", "Indian", "Malay"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (224, 351)],
"Coffee Bean": [20, 4, ["Western"], 2000, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (219, 389)],
"North Hill Food Court": [10, 3.8, ["Chinese", "Malay", "Indian"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (720,314)]
}
'''
###########################################
───╔╗───────────╔═╗─────╔╗─────╔╗─╔╗
───║║───────────║╔╝─────║║────╔╝╚╦╝╚╗
╔══╣║╔══╦══╦══╗╔╝╚╦══╦═╗║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗
║╔═╣║║╔╗║══╣══╣╚╗╔╣╔╗║╔╝║╔╗║║║║║║─║║║╔╗║╔╗╗
║╚═╣╚╣╔╗╠══╠══║─║║║╚╝║║─║╚╝║╚╝║║╚╗║╚╣╚╝║║║║
╚══╩═╩╝╚╩══╩══╝─╚╝╚══╩╝─╚══╩══╝╚═╝╚═╩══╩╝╚╝
###########################################
● We had help from online tutorials to workout the UI buttons functionality.
● A bit of corresponding tweaks incorporating into project from the tutorial that I learnt from
● ref: https://www.youtube.com/watch?v=4_9twnEduFA
'''
class button():
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self,win,outline = None):
if outline:
#draw a bigger rectangle behind to create a border
pygame.draw.rect(win, outline, (self.x-2, self.y-2, self.width+4, self.height+4),0)
#draws the button rectangle
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width, self.height),0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0,0,0))
win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#pos is the mouse position (x,y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
'''
##################################
─╔═╗─────────╔╗
─║╔╝────────╔╝╚╗
╔╝╚╦╗╔╦═╗╔══╬╗╔╬╦══╦═╗╔══╗
╚╗╔╣║║║╔╗╣╔═╝║║╠╣╔╗║╔╗╣══╣
─║║║╚╝║║║║╚═╗║╚╣║╚╝║║║╠══║
─╚╝╚══╩╝╚╩══╝╚═╩╩══╩╝╚╩══╝
##################################
╔═╗────────╔╗
║═╬═╦╦╗╔═╦╦╬╣
║╔╣╬║╔╝║╬║║║║
╚╝╚═╩╝─╠╗╠═╩╝
───────╚═╝
#################
● Most of the functions here help to draw out the different states of the screen, that the screen could be in
● The redraw functions help to update the display based on it's respective transitory states
'''
#3 functions here controls the Surface Text appearancese
def text(text,win,x,y):
font = pygame.font.SysFont('freesansbold.ttf', 50)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def instructionText(text,win,x,y):
font = pygame.font.SysFont('Arial', 20)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def header(text,win,x,y):
font = pygame.font.SysFont('TimesNewRoman', 70)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def mouseClick(screen):
#checks for mouseclick event, and fetches corresp. positions
x,y = pygame.mouse.get_pos()
if (x >= 65 and x <=727) and (y >=82 and y <= 618):
#print(event.button)
pygame.draw.circle(screen, (255,0,150), (x,y), 15)
return True, x, y
else:
print("Out of bounds!")
return False, x, y
def skeleExit(win):
#exit event
aryadelight = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
win.blit(aryadelight,(0,0))
pygame.display.update()
xaxis = 100
for i in range(1,42):
image = str(i) + ".png"
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250,200))
text("Exiting...", win, (xaxis+20), 600)
pygame.display.update()
sleep(0.09)
def loading(win):
#loading screen, slep interval defined as 0.3 seconds to load subs. frame
x = 0
while x < 3:
load0 = pygame.image.load(os.path.join("load0.png"))
win.blit(load0, (0,0))
pygame.display.update()
sleep(0.3)
load1 = pygame.image.load(os.path.join("load1.png"))
win.blit(load1, (0,0))
pygame.display.update()
sleep(0.3)
load2 = pygame.image.load(os.path.join("load2.png"))
win.blit(load2, (0,0))
pygame.display.update()
sleep(0.3)
load3 = pygame.image.load(os.path.join("load3.png"))
win.blit(load3, (0,0))
pygame.display.update()
sleep(0.3)
x += 1
# ---------------------------------------------------------------------------#
def redrawMap(screen):
#draws the embedded NTU map image provided
NTUmap = pygame.image.load(os.path.join("NTUMap.jpg"))
screen.blit(NTUmap, (0,0))
for x in range(50,900,50):
#y axial grids
pygame.draw.rect(screen, (255,0,0), (x, 0, 1, 700), 0)
for y in range(50,700,50):
#x axial grids
pygame.draw.rect(screen, (255,0,0), (0, y, 900, 1), 0)
text('Please click on your current location!',screen,200,100)
def redrawGPSMap(screen, top3, x, y):
#redraw NTU map, but this time with corresponding location coordinates
NTUmap = pygame.image.load(os.path.join("NTUMap.jpg"))
screen.blit(NTUmap, (0,0))
redGPS = pygame.image.load(os.path.join("redgps.png"))
screen.blit(redGPS, (x-16,y-32))
instructionText("You are currently at this position.", screen, x+4, y-10)
counter = 1
for i in top3:
coor = canteen_list[i][5]
if counter == 1:
blueGPS = pygame.image.load(os.path.join("bluegps.png"))
screen.blit(blueGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
if counter == 2:
blackGPS = pygame.image.load(os.path.join("blackgps.png"))
screen.blit(blackGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
if counter == 3:
yellowGPS = pygame.image.load(os.path.join("yellowgps.png"))
screen.blit(yellowGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
counter += 1
restartButton.draw(screen, (0,0,0))
def redrawMainWin(screen):
#functionality that controls what is displayed on the main window
aryadelight = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(aryadelight,(0,0))
mapButton.draw(screen, (0,0,0))
instructionText("(Choose your cuisines, preferences and budget for the meal here!)",screen,215,320)
predictButton.draw(screen, (0,0,0))
instructionText("(Find the nearest canteen!)",screen,132,470)
exitButton.draw(screen, (0,0,0))
ice = pygame.image.load(os.path.join("ice.png"))
screen.blit(ice, (500,670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render("Made by HweeHean X Arya", 1, (0,0,200))
screen.blit(creator, (535,670))
def redrawCustWin(screen):
#controls what is displayed on the customisation window
bp = pygame.image.load(os.path.join("gradient.jpg"))
screen.blit(bp,(0,0))
instructionText('Left click again to reset!',screen,300,20)
text('Please select your food preference: ', screen, 100, 50)
halalButton.draw(screen, (0,0,0))
vegButton.draw(screen, (0,0,0))
nonhalalButton.draw(screen, (0,0,0))
text('Please select your cuisine type: ', screen, 100, 200)
koreanButton.draw(screen, (0,0,0))
malayButton.draw(screen, (0,0,0))
japanButton.draw(screen, (0,0,0))
chineseButton.draw(screen, (0,0,0))
indianButton.draw(screen, (0,0,0))
westernButton.draw(screen, (0,0,0))
text('Please select your maximum budget: ', screen, 100, 430)
button3.draw(screen, (0,0,0))
button5.draw(screen, (0,0,0))
button7.draw(screen, (0,0,0))
button9.draw(screen, (0,0,0))
nextButton.draw(screen, (0,0,0))
def redrawSearchWin(screen,x,y):
#gives the top 3 most relevant results for the prediction tab
bp = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(bp,(0,0))
GordonRamsay = pygame.image.load(os.path.join("GordonRamsay.png"))
screen.blit(GordonRamsay, (400,100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text("Nearest Canteen:",screen,110,400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == "Food Court 1":
canteenPic = pygame.image.load(os.path.join("Canteen1.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 2":
canteenPic = pygame.image.load(os.path.join("Canteen2.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 4":
canteenPic = pygame.image.load(os.path.join("Canteen4.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 9":
canteenPic = pygame.image.load(os.path.join("Canteen9.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 11":
canteenPic = pygame.image.load(os.path.join("Canteen11.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 13":
canteenPic = pygame.image.load(os.path.join("Canteen13.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 14":
canteenPic = pygame.image.load(os.path.join("Canteen14.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 16":
canteenPic = pygame.image.load(os.path.join("Canteen16.png"))
screen.blit(canteenPic, (150,200))
if k == "Tamarind Food Court":
canteenPic = pygame.image.load(os.path.join("Tamarind.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Pioneer Food Court":
canteenPic = pygame.image.load(os.path.join("Pioneer.png"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Food Court":
canteenPic = pygame.image.load(os.path.join("NorthSpine.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Plaza":
canteenPic = pygame.image.load(os.path.join("NorthSpinePlaza.jpg"))
screen.blit(canteenPic, (150,200))
if k == "South Spine Food Court":
canteenPic = pygame.image.load(os.path.join("SouthSpineKoufuFoodCourt.png"))
screen.blit(canteenPic, (150,200))
if k == "Quad Cafe":
canteenPic = pygame.image.load(os.path.join("Quad.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Coffee Bean":
canteenPic = pygame.image.load(os.path.join("Coffee.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Hill Food Court":
canteenPic = pygame.image.load(os.path.join("NorthHill.jpg"))
screen.blit(canteenPic, (150,200))
text(str(canteenCount), screen, 110, yaxis)
text(".", screen, 135, yaxis)
text(k,screen,150,yaxis)
canteenCount += 1
yaxis += 70
return top3
def complicatedSearchWin(screen,top3):
#displays the top3 results for the end user after clicking customisation
bp = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(bp,(0,0))
GordonRamsay = pygame.image.load(os.path.join("GordonRamsay.png"))
screen.blit(GordonRamsay, (400,100))
text("Nearest Canteen:",screen,110,400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == "Food Court 1":
canteenPic = pygame.image.load(os.path.join("Canteen1.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 2":
canteenPic = pygame.image.load(os.path.join("Canteen2.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 4":
canteenPic = pygame.image.load(os.path.join("Canteen4.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 9":
canteenPic = pygame.image.load(os.path.join("Canteen9.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 11":
canteenPic = pygame.image.load(os.path.join("Canteen11.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 13":
canteenPic = pygame.image.load(os.path.join("Canteen13.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 14":
canteenPic = pygame.image.load(os.path.join("Canteen14.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 16":
canteenPic = pygame.image.load(os.path.join("Canteen16.png"))
screen.blit(canteenPic, (150,200))
if k == "Tamarind Food Court":
canteenPic = pygame.image.load(os.path.join("Tamarind.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Pioneer Food Court":
canteenPic = pygame.image.load(os.path.join("Pioneer.png"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Food Court":
canteenPic = pygame.image.load(os.path.join("NorthSpine.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Plaza":
canteenPic = pygame.image.load(os.path.join("NorthSpinePlaza.jpg"))
screen.blit(canteenPic, (150,200))
if k == "South Spine Food Court":
canteenPic = pygame.image.load(os.path.join("SouthSpineKoufuFoodCourt.png"))
screen.blit(canteenPic, (150,200))
if k == "Quad Cafe":
canteenPic = pygame.image.load(os.path.join("Quad.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Coffee Bean":
canteenPic = pygame.image.load(os.path.join("Coffee.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Hill Food Court":
canteenPic = pygame.image.load(os.path.join("NorthHill.jpg"))
screen.blit(canteenPic, (150,200))
text(str(canteenCount), screen, 110, yaxis)
text(".", screen, 135, yaxis)
text(k,screen,150,yaxis)
canteenCount += 1
yaxis += 70
'''
╔═╗────╔═╗───╔╗╔╗
║═╬═╦╦╗║═╬═╦╦╣╚╬╬═╦╦═╗
║╔╣╬║╔╝╠═║╬║╔╣╔╣║║║║╬║
╚╝╚═╩╝─╚═╩═╩╝╚═╩╩╩═╬╗║
───────────────────╚═╝
###########################
● Functions below control how we do the sorting for the distance
and the different cuisines
'''
#function provided by ARYA
#function to compile a list of all the relevant food courts
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
#Creating a list of all food courts that fit in the user's budget
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
#Creating a list of all food courts according to the imposed constraints on cuisine
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
#Adding to the list, all the food courts according to the food preferences specified
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
#eliminating all the repeated options
new_list = list(set(new_list))
#if new_list is empty due to no selection made
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return(new_list)
#function to calulate the horizontal distance from you to proposed option
def calc_dis(x1, y1, x2, y2):
return ((x1-x2)**2 + (y1-y2)**2)**1/2
#function to find out the nearest suitable food outlet/food court
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
#coordinates of the food court
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
'''
#########################
╔╗─────╔╗─╔╗
║║────╔╝╚╦╝╚╗
║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗╔══╗
║╔╗║║║║║║─║║║╔╗║╔╗╣══╣
║╚╝║╚╝║║╚╗║╚╣╚╝║║║╠══║
╚══╩══╝╚═╝╚═╩══╩╝╚╩══╝
#########################
● This is where the buttons are defined. Using the class...
● They are relatively self-explanatory
'''
#buttons for the main loading page:
mapButton = button((255,255,255), 200, 250, 500, 100, 'Canteen Customisation')
predictButton = button((255,255,255), 100, 400, 300, 100, 'Prediction')
exitButton = button((255,255,255), 500, 400, 300, 100, 'Exit')
#buttons for the custimisation screen:
halalButton = button((255,255,255), 50, 120, 250, 50, 'Halal')
vegButton = button((255,255,255), 320, 120, 250, 50, 'Vegetarian')
nonhalalButton = button((255,255,255), 590, 120, 250, 50, 'Non-Halal')
koreanButton = button((255,255,255), 50, 270, 250, 50, 'Korean')
malayButton = button((255,255,255), 320, 270, 250, 50, 'Malay')
japanButton = button((255,255,255), 590, 270, 250, 50, 'Japanese')
chineseButton = button((255,255,255), 50, 340, 250, 50, 'Chinese')
indianButton = button((255,255,255), 320, 340, 250, 50, 'Indian')
westernButton = button((255,255,255), 590, 340, 250, 50, 'Western')
button3 = button((255,255,255), 235, 490, 70, 50, '$3')
button5 = button((255,255,255), 355, 490, 70, 50, '$5')
button7 = button((255,255,255), 475, 490, 70, 50, '$7')
button9 = button((255,255,255), 595, 490, 70, 50, '$10')
nextButton = button((255,255,255), 730, 580, 120, 70, 'Next')
#buttons to showcase GPS:
gpsButton = button((255,255,255), 700, 600, 170, 50, 'to Map')
restartButton = button((255,255,255), 700, 600, 190, 50, 'Restart?')
'''
#############################
────╔╗────╔╗
───╔╝╚╗──╔╝╚╗
╔══╬╗╔╬══╬╗╔╬══╦══╗
║══╣║║║╔╗║║║║║═╣══╣
╠══║║╚╣╔╗║║╚╣║═╬══║
╚══╝╚═╩╝╚╝╚═╩══╩══╝
#############################
● Since I'm only using one while loop and all the functions are in here,
it is important to note that none of the "if" statements interfere with
each other
● Acts like a flip-flop which stores the data of the different STATES
'''
#originalstate of customisation buttons
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
#original state of events
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
'''
####################################
╔═╗╔═╗───────╔═══╗
║║╚╝║║───────║╔═╗║
║╔╗╔╗╠══╦╦═╗─║╚═╝╠═╦══╦══╦═╦══╦╗╔╗
║║║║║║╔╗╠╣╔╗╗║╔══╣╔╣╔╗║╔╗║╔╣╔╗║╚╝║
║║║║║║╔╗║║║║║║║──║║║╚╝║╚╝║║║╔╗║║║║
╚╝╚╝╚╩╝╚╩╩╝╚╝╚╝──╚╝╚══╩═╗╠╝╚╝╚╩╩╩╝
──────────────────────╔═╝║
──────────────────────╚══╝
####################################
● It involves a lot of existing predefined states, turning on and off to display
multiple things without them interfering with each other's functionality
● I.e. Clicking customisation button will disable itself, hence
if the mouse is clicked over at the same area, it will not
be activated again.
● This is every important to have a smooth flow.
● Also left some debugging messages within the console to help
understand what is going on behind the scenes
'''
pygame.init()
run = True
clock = pygame.time.Clock()
#start the pygame programme
while run:
#if true, redraws the main window
if checkButton:
redrawMainWin(screen)
#if true, redraws the customisation window
if customisationMenu:
redrawCustWin(screen)
if easySearch:
if oneTime:
nearest_canteen = redrawSearchWin(screen, x, y)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0,0,0))
#if true, redraws the complicated cusomisation results
if complicatedMenu:
if oneTime:
complicatedSearchWin(screen, nearest_canteen)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0,0,0))
#redraws the GPS map, with point locaters indicated
if gpsButtonPressed == True:
redrawGPSMap(screen, nearest_canteen, x, y)
pygame.display.update()
clock.tick(30)
#checks event
for event in pygame.event.get():
#Fetches the mouse position
pos = pygame.mouse.get_pos()
#Quits the pygame programme
if event.type == pygame.QUIT:
run = False
pygame.quit()
if gpsButtonPressed:
if event.type == pygame.MOUSEBUTTONDOWN:
if restartButton.isOver(pos):
restartButton.colour = (50,50,50)
restartButton.draw(screen, (0,0,0))
pygame.display.update()
print('clicked the restart button')
#original state of customisation buttons
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
#original state of events
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
if event.type == pygame.MOUSEMOTION:
if restartButton.isOver(pos):
restartButton.colour = (0,255,0)
continue
else:
restartButton.colour = (255,255,255)
continue
if easySearch == True or complicatedMenu == True:
if event.type == pygame.MOUSEBUTTONDOWN:
if gpsButton.isOver(pos):
gpsButton.colour = (50,50,50)
gpsButton.draw(screen, (0,0,0))
pygame.display.update()
print('clicked gps button')
gpsButtonPressed = True
easySearch = False
complicatedMenu = False
continue
if event.type == pygame.MOUSEMOTION:
if gpsButton.isOver(pos):
gpsButton.colour = (0,255,0)
continue
else:
gpsButton.colour = (255,255,255)
continue
#if mouse is clicked over buttons (main page)
if checkButton:
if event.type == pygame.MOUSEBUTTONDOWN:
if mapButton.isOver(pos):
mapButton.colour = (0,255,0)
redrawMainWin(screen)
pygame.display.update()
print('clicked map button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor = True
continue
if predictButton.isOver(pos):
predictButton.colour = (0,255,0)
redrawMainWin(screen)
pygame.display.update()
print('clicked predict button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor2 = True
continue
if exitButton.isOver(pos):
exitButton.colour = (0,255,0)
print('Exiting...')
skeleExit(screen)
pygame.quit()
run = False
exit()
#if mouse hovered over the button (main page)
if event.type == pygame.MOUSEMOTION:
if mapButton.isOver(pos):
mapButton.colour = (255,0,0)
else:
mapButton.colour = (255,255,255)
if predictButton.isOver(pos):
predictButton.colour = (255,0,0)
else:
predictButton.colour = (255,255,255)
if exitButton.isOver(pos):
exitButton.colour = (255,0,0)
else:
exitButton.colour = (255,255,255)
#clicking buttons in the customisation menu:
if customisationMenu:
if event.type == pygame.MOUSEMOTION:
if nextButton.isOver(pos):
nextButton.colour = (0,0,255)
else:
nextButton.colour = (255,255,255)
continue
if event.type == pygame.MOUSEBUTTONDOWN:
#clicking on next button
if nextButton.isOver(pos):
nextButton.colour = (255,255,0)
nextButtonPressed = True
customisationMenu = False
continue
if halalButton.isOver(pos):
if halalButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
halalButton.colour = (0,255,0)
print('clicked Halal button')
halalButtonPressed = True
continue
else:
halalButton.colour = (255,255,255)
halalButtonPressed = False
continue
if vegButton.isOver(pos):
if vegButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
vegButton.colour = (0,255,0)
print('clicked Vegetarian button')
vegButtonPressed = True
continue
else:
vegButton.colour = (255,255,255)
vegButtonPressed = False
continue
if nonhalalButton.isOver(pos):
if nonhalalButtonPressed == False:
if halalButtonPressed:
halalButton.colour = (255,255,255)
halalButtonPressed = False
if vegButtonPressed:
vegButton.colour = (255,255,255)
vegButtonPressed = False
nonhalalButton.colour = (0,255,0)
print('clicked non-halal button')
nonhalalButtonPressed = True
continue
else:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
if koreanButton.isOver(pos):
if koreanButtonPressed == False:
koreanButton.colour = (0,255,0)
print('clicked korean button')
koreanButtonPressed = True
continue
else:
koreanButton.colour = (255,255,255)
koreanButtonPressed = False
if malayButton.isOver(pos):
if malayButtonPressed == False:
malayButton.colour = (0,255,0)
print('clicked Malay button')
malayButtonPressed = True
continue
else:
malayButton.colour = (255,255,255)
malayButtonPressed = False
if japanButton.isOver(pos):
if japanButtonPressed == False:
japanButton.colour = (0,255,0)
print('clicked japan button')
japanButtonPressed = True
continue
else:
japanButton.colour = (255,255,255)
japanButtonPressed = False
if chineseButton.isOver(pos):
if chineseButtonPressed == False:
chineseButton.colour = (0,255,0)
print('clicked chinese button')
chineseButtonPressed = True
continue
else:
chineseButton.colour = (255,255,255)
chineseButtonPressed = False
if indianButton.isOver(pos):
if indianButtonPressed == False:
indianButton.colour = (0,255,0)
print('clicked indian button')
indianButtonPressed = True
continue
else:
indianButton.colour = (255,255,255)
indianButtonPressed = False
if westernButton.isOver(pos):
if westernButtonPressed == False:
westernButton.colour = (0,255,0)
print('clicked western button')
westernButtonPressed = True
continue
else:
westernButton.colour = (255,255,255)
westernButtonPressed = False
if button3.isOver(pos):
if button3Pressed == False:
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button3.colour = (0,255,0)
print('clicked $3')
button3Pressed = True
continue
else:
button3.colour = (255,255,255)
button3Pressed = False
if button5.isOver(pos):
if button5Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button5.colour = (0,255,0)
print('Clicked $5')
button5Pressed = True
continue
else:
button5.colour = (255,255,255)
button5Pressed = False
if button7.isOver(pos):
if button7Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button7.colour = (0,255,0)
print('Clicked $7')
button7Pressed = True
continue
else:
button7.colour = (255,255,255)
button7Pressed = False
if button9.isOver(pos):
if button9Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
button9.colour = (0,255,0)
print('Clicked $10')
button9Pressed = True
continue
else:
button9.colour = (255,255,255)
button9Pressed = False
#if mousebuttondown and map is already displayed
if mapCoor == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
#pygame.time.delay(2000)
mapCoor = False
sleep(1)
customisationMenu = True
#if prediction button is clicked
if mapCoor2 == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
#pygame.time.delay(2000)
mapCoor2 = False
sleep(1)
loading(screen)
easySearch = True
#things that happen after the next button is pressed
if nextButtonPressed:
sleep(1)
loading(screen)
user_prefList = []
user_cuisineList = []
user_budget = 0
if halalButtonPressed:
user_prefList.append("Halal")
if vegButtonPressed:
user_prefList.append("Vegetarian")
if nonhalalButtonPressed:
user_prefList.append("Non-Halal/Non-Vegetarian")
if koreanButtonPressed:
user_cuisineList.append("Korean")
if malayButtonPressed:
user_cuisineList.append("Malay")
if japanButtonPressed:
user_cuisineList.append("Japanese")
if chineseButtonPressed:
user_cuisineList.append("Chinese")
if indianButtonPressed:
user_cuisineList.append("Indian")
if westernButtonPressed:
user_cuisineList.append("Western")
if button3Pressed:
user_budget = 3
if button5Pressed:
user_budget = 5
if button7Pressed:
user_budget = 7
if button9Pressed:
user_budget = 9
#debug
print(user_cuisineList)
print(user_prefList)
print(user_budget)
#continue#
finalID = final_list(user_budget, user_cuisineList, user_prefList)
print(finalID)
nearest_canteen = nearest_can(finalID, x, y)
print(nearest_canteen)
sleep(1)
nextButtonPressed = False
complicatedMenu = True
|
normal
|
{
"blob_id": "2a8032c23e3c7aa3a7b0593c79db7adbc0353f93",
"index": 2125,
"step-1": "<mask token>\n\n\nclass button:\n\n def __init__(self, colour, x, y, width, height, text=''):\n self.colour = colour\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self, win, outline=None):\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.\n width + 4, self.height + 4), 0)\n pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,\n self.height), 0)\n if self.text != '':\n font = pygame.font.SysFont('calligrapher.ttf', 60)\n text = font.render(self.text, 1, (0, 0, 0))\n win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2\n ), self.y + (self.height / 2 - text.get_height() / 2)))\n\n def isOver(self, pos):\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef mouseClick(screen):\n x, y = pygame.mouse.get_pos()\n if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):\n pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)\n return True, x, y\n else:\n print('Out of bounds!')\n return False, x, y\n\n\ndef skeleExit(win):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n win.blit(aryadelight, (0, 0))\n pygame.display.update()\n xaxis = 100\n for i in range(1, 42):\n image = str(i) + '.png'\n skele = pygame.image.load(os.path.join(image))\n win.blit(skele, (250, 200))\n text('Exiting...', win, xaxis + 20, 600)\n pygame.display.update()\n sleep(0.09)\n\n\n<mask token>\n\n\ndef redrawMainWin(screen):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(aryadelight, (0, 0))\n mapButton.draw(screen, (0, 0, 0))\n instructionText(\n '(Choose your cuisines, preferences and budget for the meal here!)',\n screen, 215, 320)\n predictButton.draw(screen, (0, 0, 0))\n instructionText('(Find the nearest canteen!)', screen, 132, 470)\n exitButton.draw(screen, (0, 0, 0))\n ice = pygame.image.load(os.path.join('ice.png'))\n screen.blit(ice, (500, 670))\n font = pygame.font.SysFont('verdana', 20)\n creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))\n screen.blit(creator, (535, 670))\n\n\n<mask token>\n\n\ndef redrawSearchWin(screen, x, y):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n distList = []\n for i in canteen_list:\n distList.append(i)\n print(distList)\n top3 = nearest_can(distList, x, y)\n print(top3)\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n return top3\n\n\n<mask token>\n\n\ndef final_list(user_budget, user_cuisine, user_preference):\n new_list = []\n for i in canteen_list:\n if user_budget >= canteen_list[i][1]:\n new_list.append(i)\n for c in user_cuisine:\n for i in canteen_list:\n if c in canteen_list[i][2]:\n new_list.append(i)\n for c in user_preference:\n for i in canteen_list:\n if c in canteen_list[i][4]:\n new_list.append(i)\n new_list = list(set(new_list))\n if len(new_list) == 0:\n for i in canteen_list:\n new_list.append(i)\n return new_list\n\n\ndef calc_dis(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2\n\n\ndef nearest_can(new_list, x, y):\n top3 = []\n copy_list = new_list.copy()\n while len(top3) != 3:\n j = copy_list[0]\n coor = canteen_list[j][5]\n Min = calc_dis(x, y, coor[0], coor[1])\n food_court = ''\n for k in copy_list:\n coor = canteen_list[k][5]\n dist = calc_dis(x, y, coor[0], coor[1])\n if Min >= dist:\n Min = dist\n food_court = k\n index = copy_list.index(food_court)\n copy_list.pop(index)\n top3.append(food_court)\n print(top3)\n return top3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass button:\n\n def __init__(self, colour, x, y, width, height, text=''):\n self.colour = colour\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self, win, outline=None):\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.\n width + 4, self.height + 4), 0)\n pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,\n self.height), 0)\n if self.text != '':\n font = pygame.font.SysFont('calligrapher.ttf', 60)\n text = font.render(self.text, 1, (0, 0, 0))\n win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2\n ), self.y + (self.height / 2 - text.get_height() / 2)))\n\n def isOver(self, pos):\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef instructionText(text, win, x, y):\n font = pygame.font.SysFont('Arial', 20)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\n<mask token>\n\n\ndef mouseClick(screen):\n x, y = pygame.mouse.get_pos()\n if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):\n pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)\n return True, x, y\n else:\n print('Out of bounds!')\n return False, x, y\n\n\ndef skeleExit(win):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n win.blit(aryadelight, (0, 0))\n pygame.display.update()\n xaxis = 100\n for i in range(1, 42):\n image = str(i) + '.png'\n skele = pygame.image.load(os.path.join(image))\n win.blit(skele, (250, 200))\n text('Exiting...', win, xaxis + 20, 600)\n pygame.display.update()\n sleep(0.09)\n\n\n<mask token>\n\n\ndef redrawMainWin(screen):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(aryadelight, (0, 0))\n mapButton.draw(screen, (0, 0, 0))\n instructionText(\n '(Choose your cuisines, preferences and budget for the meal here!)',\n screen, 215, 320)\n predictButton.draw(screen, (0, 0, 0))\n instructionText('(Find the nearest canteen!)', screen, 132, 470)\n exitButton.draw(screen, (0, 0, 0))\n ice = pygame.image.load(os.path.join('ice.png'))\n screen.blit(ice, (500, 670))\n font = pygame.font.SysFont('verdana', 20)\n creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))\n screen.blit(creator, (535, 670))\n\n\n<mask token>\n\n\ndef redrawSearchWin(screen, x, y):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n distList = []\n for i in canteen_list:\n distList.append(i)\n print(distList)\n top3 = nearest_can(distList, x, y)\n print(top3)\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n return top3\n\n\n<mask token>\n\n\ndef final_list(user_budget, user_cuisine, user_preference):\n new_list = []\n for i in canteen_list:\n if user_budget >= canteen_list[i][1]:\n new_list.append(i)\n for c in user_cuisine:\n for i in canteen_list:\n if c in canteen_list[i][2]:\n new_list.append(i)\n for c in user_preference:\n for i in canteen_list:\n if c in canteen_list[i][4]:\n new_list.append(i)\n new_list = list(set(new_list))\n if len(new_list) == 0:\n for i in canteen_list:\n new_list.append(i)\n return new_list\n\n\ndef calc_dis(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2\n\n\ndef nearest_can(new_list, x, y):\n top3 = []\n copy_list = new_list.copy()\n while len(top3) != 3:\n j = copy_list[0]\n coor = canteen_list[j][5]\n Min = calc_dis(x, y, coor[0], coor[1])\n food_court = ''\n for k in copy_list:\n coor = canteen_list[k][5]\n dist = calc_dis(x, y, coor[0], coor[1])\n if Min >= dist:\n Min = dist\n food_court = k\n index = copy_list.index(food_court)\n copy_list.pop(index)\n top3.append(food_court)\n print(top3)\n return top3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass button:\n\n def __init__(self, colour, x, y, width, height, text=''):\n self.colour = colour\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self, win, outline=None):\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.\n width + 4, self.height + 4), 0)\n pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,\n self.height), 0)\n if self.text != '':\n font = pygame.font.SysFont('calligrapher.ttf', 60)\n text = font.render(self.text, 1, (0, 0, 0))\n win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2\n ), self.y + (self.height / 2 - text.get_height() / 2)))\n\n def isOver(self, pos):\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef text(text, win, x, y):\n font = pygame.font.SysFont('freesansbold.ttf', 50)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\ndef instructionText(text, win, x, y):\n font = pygame.font.SysFont('Arial', 20)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\n<mask token>\n\n\ndef mouseClick(screen):\n x, y = pygame.mouse.get_pos()\n if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):\n pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)\n return True, x, y\n else:\n print('Out of bounds!')\n return False, x, y\n\n\ndef skeleExit(win):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n win.blit(aryadelight, (0, 0))\n pygame.display.update()\n xaxis = 100\n for i in range(1, 42):\n image = str(i) + '.png'\n skele = pygame.image.load(os.path.join(image))\n win.blit(skele, (250, 200))\n text('Exiting...', win, xaxis + 20, 600)\n pygame.display.update()\n sleep(0.09)\n\n\n<mask token>\n\n\ndef redrawMap(screen):\n NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))\n screen.blit(NTUmap, (0, 0))\n for x in range(50, 900, 50):\n pygame.draw.rect(screen, (255, 0, 0), (x, 0, 1, 700), 0)\n for y in range(50, 700, 50):\n pygame.draw.rect(screen, (255, 0, 0), (0, y, 900, 1), 0)\n text('Please click on your current location!', screen, 200, 100)\n\n\n<mask token>\n\n\ndef redrawMainWin(screen):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(aryadelight, (0, 0))\n mapButton.draw(screen, (0, 0, 0))\n instructionText(\n '(Choose your cuisines, preferences and budget for the meal here!)',\n screen, 215, 320)\n predictButton.draw(screen, (0, 0, 0))\n instructionText('(Find the nearest canteen!)', screen, 132, 470)\n exitButton.draw(screen, (0, 0, 0))\n ice = pygame.image.load(os.path.join('ice.png'))\n screen.blit(ice, (500, 670))\n font = pygame.font.SysFont('verdana', 20)\n creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))\n screen.blit(creator, (535, 670))\n\n\n<mask token>\n\n\ndef redrawSearchWin(screen, x, y):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n distList = []\n for i in canteen_list:\n distList.append(i)\n print(distList)\n top3 = nearest_can(distList, x, y)\n print(top3)\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n return top3\n\n\ndef complicatedSearchWin(screen, top3):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n\n\n<mask token>\n\n\ndef final_list(user_budget, user_cuisine, user_preference):\n new_list = []\n for i in canteen_list:\n if user_budget >= canteen_list[i][1]:\n new_list.append(i)\n for c in user_cuisine:\n for i in canteen_list:\n if c in canteen_list[i][2]:\n new_list.append(i)\n for c in user_preference:\n for i in canteen_list:\n if c in canteen_list[i][4]:\n new_list.append(i)\n new_list = list(set(new_list))\n if len(new_list) == 0:\n for i in canteen_list:\n new_list.append(i)\n return new_list\n\n\ndef calc_dis(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2\n\n\ndef nearest_can(new_list, x, y):\n top3 = []\n copy_list = new_list.copy()\n while len(top3) != 3:\n j = copy_list[0]\n coor = canteen_list[j][5]\n Min = calc_dis(x, y, coor[0], coor[1])\n food_court = ''\n for k in copy_list:\n coor = canteen_list[k][5]\n dist = calc_dis(x, y, coor[0], coor[1])\n if Min >= dist:\n Min = dist\n food_court = k\n index = copy_list.index(food_court)\n copy_list.pop(index)\n top3.append(food_court)\n print(top3)\n return top3\n\n\n<mask token>\n",
"step-4": "import pygame\nimport os\nfrom time import sleep\nscreen = pygame.display.set_mode((900, 700))\nscreen.fill((255, 255, 255))\npygame.display.set_caption('NTUFOODIERECOMMENDSYSTEM')\n<mask token>\ncanteen_list = {'Food Court 1': [12, 3.5, ['Korean', 'Japanese', 'Western'],\n 2100, ['Halal', 'Non-Halal/Non-Vegetarian'], (442, 473)],\n 'Food Court 2': [10, 3.6, ['Korean', 'Chinese', 'Malay'], 2100, [\n 'Halal', 'Vegetarian', 'Non-Halal/Non-Vegetarian'], (477, 409)],\n 'Food Court 4': [10, 3, ['Chinese', 'Western'], 2100, [\n 'Non-Halal/Non-Vegetarian'], (358, 526)], 'Food Court 9': [10, 3.5, [\n 'Chinese'], 2100, ['Halal', 'Vegetarian', 'Non-Halal/Non-Vegetarian'],\n (582, 288)], 'Food Court 11': [10, 2.5, ['Chinese', 'Indian',\n 'Japanese', 'Western'], 2100, ['Halal', 'Vegetarian',\n 'Non-Halal/Non-Vegetarian'], (682, 243)], 'Food Court 13': [9, 2, [\n 'Western', 'Korean', 'Japanese', 'Chinese'], 2100, ['Halal',\n 'Vegetarian', 'Non-Halal/Non-Vegetarian'], (445, 176)], 'Food Court 14':\n [8, 3, ['Western', 'Chinese', 'Korean', 'Malay'], 2100, ['Halal',\n 'Vegetarian', 'Non-Halal/Non-Vegetarian'], (509, 182)], 'Food Court 16':\n [10, 3.3, ['Japanese', 'Chinese', 'Korean', 'Indian'], 2100, ['Halal',\n 'Vegetarian', 'Non-Halal/Non-Vegetarian'], (405, 221)],\n 'Tamarind Food Court': [10, 3, ['Malay', 'Chinese', 'Korean', 'Western'\n ], 2100, ['Halal', 'Non-Halal', 'Vegetarian',\n 'Non-Halal/Non-Vegetarian'], (627, 200)], 'Pioneer Food Court': [20, \n 2.3, ['Thai', 'Chinese'], 0, ['Vegetarian', 'Non-Halal/Non-Vegetarian'],\n (497, 561)], 'North Spine Food Court': [10, 2.5, ['Korean', 'Japanese',\n 'Chinese', 'Western', 'Malay'], 2100, ['Vegetarian',\n 'Non-Halal/Non-Vegetarian'], (275, 293)], 'North Spine Plaza': [10, 4,\n ['Western', 'Korean'], 2130, ['Vegetarian', 'Halal',\n 'Non-Halal/Non-Vegetarian'], (287, 339)], 'South Spine Food Court': [10,\n 2, ['Chinese', 'Malay', 'Korean', 'Japanese', 'Western'], 2100, [\n 'Vegetarian', 'Halal', 'Non-Halal/Non-Vegetarian'], (227, 496)],\n 'Quad Cafe': [10, 2.4, ['Korean', 'Chinese', 'Indian', 'Malay'], 2100,\n ['Vegetarian', 'Halal', 'Non-Halal/Non-Vegetarian'], (224, 351)],\n 'Coffee Bean': [20, 4, ['Western'], 2000, ['Vegetarian', 'Halal',\n 'Non-Halal/Non-Vegetarian'], (219, 389)], 'North Hill Food Court': [10,\n 3.8, ['Chinese', 'Malay', 'Indian'], 2100, ['Vegetarian', 'Halal',\n 'Non-Halal/Non-Vegetarian'], (720, 314)]}\n<mask token>\n\n\nclass button:\n\n def __init__(self, colour, x, y, width, height, text=''):\n self.colour = colour\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self, win, outline=None):\n if outline:\n pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.\n width + 4, self.height + 4), 0)\n pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,\n self.height), 0)\n if self.text != '':\n font = pygame.font.SysFont('calligrapher.ttf', 60)\n text = font.render(self.text, 1, (0, 0, 0))\n win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2\n ), self.y + (self.height / 2 - text.get_height() / 2)))\n\n def isOver(self, pos):\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n else:\n return False\n\n\n<mask token>\n\n\ndef text(text, win, x, y):\n font = pygame.font.SysFont('freesansbold.ttf', 50)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\ndef instructionText(text, win, x, y):\n font = pygame.font.SysFont('Arial', 20)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\ndef header(text, win, x, y):\n font = pygame.font.SysFont('TimesNewRoman', 70)\n phrase = font.render(text, 1, (0, 0, 0))\n win.blit(phrase, (x, y))\n\n\ndef mouseClick(screen):\n x, y = pygame.mouse.get_pos()\n if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):\n pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)\n return True, x, y\n else:\n print('Out of bounds!')\n return False, x, y\n\n\ndef skeleExit(win):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n win.blit(aryadelight, (0, 0))\n pygame.display.update()\n xaxis = 100\n for i in range(1, 42):\n image = str(i) + '.png'\n skele = pygame.image.load(os.path.join(image))\n win.blit(skele, (250, 200))\n text('Exiting...', win, xaxis + 20, 600)\n pygame.display.update()\n sleep(0.09)\n\n\ndef loading(win):\n x = 0\n while x < 3:\n load0 = pygame.image.load(os.path.join('load0.png'))\n win.blit(load0, (0, 0))\n pygame.display.update()\n sleep(0.3)\n load1 = pygame.image.load(os.path.join('load1.png'))\n win.blit(load1, (0, 0))\n pygame.display.update()\n sleep(0.3)\n load2 = pygame.image.load(os.path.join('load2.png'))\n win.blit(load2, (0, 0))\n pygame.display.update()\n sleep(0.3)\n load3 = pygame.image.load(os.path.join('load3.png'))\n win.blit(load3, (0, 0))\n pygame.display.update()\n sleep(0.3)\n x += 1\n\n\ndef redrawMap(screen):\n NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))\n screen.blit(NTUmap, (0, 0))\n for x in range(50, 900, 50):\n pygame.draw.rect(screen, (255, 0, 0), (x, 0, 1, 700), 0)\n for y in range(50, 700, 50):\n pygame.draw.rect(screen, (255, 0, 0), (0, y, 900, 1), 0)\n text('Please click on your current location!', screen, 200, 100)\n\n\ndef redrawGPSMap(screen, top3, x, y):\n NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))\n screen.blit(NTUmap, (0, 0))\n redGPS = pygame.image.load(os.path.join('redgps.png'))\n screen.blit(redGPS, (x - 16, y - 32))\n instructionText('You are currently at this position.', screen, x + 4, y -\n 10)\n counter = 1\n for i in top3:\n coor = canteen_list[i][5]\n if counter == 1:\n blueGPS = pygame.image.load(os.path.join('bluegps.png'))\n screen.blit(blueGPS, (coor[0] - 12, coor[1] - 24))\n instructionText(i, screen, coor[0] - 24, coor[1])\n pass\n if counter == 2:\n blackGPS = pygame.image.load(os.path.join('blackgps.png'))\n screen.blit(blackGPS, (coor[0] - 12, coor[1] - 24))\n instructionText(i, screen, coor[0] - 24, coor[1])\n pass\n if counter == 3:\n yellowGPS = pygame.image.load(os.path.join('yellowgps.png'))\n screen.blit(yellowGPS, (coor[0] - 12, coor[1] - 24))\n instructionText(i, screen, coor[0] - 24, coor[1])\n pass\n counter += 1\n restartButton.draw(screen, (0, 0, 0))\n\n\ndef redrawMainWin(screen):\n aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(aryadelight, (0, 0))\n mapButton.draw(screen, (0, 0, 0))\n instructionText(\n '(Choose your cuisines, preferences and budget for the meal here!)',\n screen, 215, 320)\n predictButton.draw(screen, (0, 0, 0))\n instructionText('(Find the nearest canteen!)', screen, 132, 470)\n exitButton.draw(screen, (0, 0, 0))\n ice = pygame.image.load(os.path.join('ice.png'))\n screen.blit(ice, (500, 670))\n font = pygame.font.SysFont('verdana', 20)\n creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))\n screen.blit(creator, (535, 670))\n\n\ndef redrawCustWin(screen):\n bp = pygame.image.load(os.path.join('gradient.jpg'))\n screen.blit(bp, (0, 0))\n instructionText('Left click again to reset!', screen, 300, 20)\n text('Please select your food preference: ', screen, 100, 50)\n halalButton.draw(screen, (0, 0, 0))\n vegButton.draw(screen, (0, 0, 0))\n nonhalalButton.draw(screen, (0, 0, 0))\n text('Please select your cuisine type: ', screen, 100, 200)\n koreanButton.draw(screen, (0, 0, 0))\n malayButton.draw(screen, (0, 0, 0))\n japanButton.draw(screen, (0, 0, 0))\n chineseButton.draw(screen, (0, 0, 0))\n indianButton.draw(screen, (0, 0, 0))\n westernButton.draw(screen, (0, 0, 0))\n text('Please select your maximum budget: ', screen, 100, 430)\n button3.draw(screen, (0, 0, 0))\n button5.draw(screen, (0, 0, 0))\n button7.draw(screen, (0, 0, 0))\n button9.draw(screen, (0, 0, 0))\n nextButton.draw(screen, (0, 0, 0))\n\n\ndef redrawSearchWin(screen, x, y):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n distList = []\n for i in canteen_list:\n distList.append(i)\n print(distList)\n top3 = nearest_can(distList, x, y)\n print(top3)\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n return top3\n\n\ndef complicatedSearchWin(screen, top3):\n bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))\n screen.blit(bp, (0, 0))\n GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))\n screen.blit(GordonRamsay, (400, 100))\n text('Nearest Canteen:', screen, 110, 400)\n yaxis = 490\n canteenCount = 1\n for k in top3:\n if canteenCount == 1:\n if k == 'Food Court 1':\n canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 2':\n canteenPic = pygame.image.load(os.path.join('Canteen2.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 4':\n canteenPic = pygame.image.load(os.path.join('Canteen4.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 9':\n canteenPic = pygame.image.load(os.path.join('Canteen9.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 11':\n canteenPic = pygame.image.load(os.path.join('Canteen11.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 13':\n canteenPic = pygame.image.load(os.path.join('Canteen13.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 14':\n canteenPic = pygame.image.load(os.path.join('Canteen14.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Food Court 16':\n canteenPic = pygame.image.load(os.path.join('Canteen16.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Tamarind Food Court':\n canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Pioneer Food Court':\n canteenPic = pygame.image.load(os.path.join('Pioneer.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Spine Plaza':\n canteenPic = pygame.image.load(os.path.join(\n 'NorthSpinePlaza.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'South Spine Food Court':\n canteenPic = pygame.image.load(os.path.join(\n 'SouthSpineKoufuFoodCourt.png'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Quad Cafe':\n canteenPic = pygame.image.load(os.path.join('Quad.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'Coffee Bean':\n canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))\n screen.blit(canteenPic, (150, 200))\n if k == 'North Hill Food Court':\n canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))\n screen.blit(canteenPic, (150, 200))\n text(str(canteenCount), screen, 110, yaxis)\n text('.', screen, 135, yaxis)\n text(k, screen, 150, yaxis)\n canteenCount += 1\n yaxis += 70\n\n\n<mask token>\n\n\ndef final_list(user_budget, user_cuisine, user_preference):\n new_list = []\n for i in canteen_list:\n if user_budget >= canteen_list[i][1]:\n new_list.append(i)\n for c in user_cuisine:\n for i in canteen_list:\n if c in canteen_list[i][2]:\n new_list.append(i)\n for c in user_preference:\n for i in canteen_list:\n if c in canteen_list[i][4]:\n new_list.append(i)\n new_list = list(set(new_list))\n if len(new_list) == 0:\n for i in canteen_list:\n new_list.append(i)\n return new_list\n\n\ndef calc_dis(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2\n\n\ndef nearest_can(new_list, x, y):\n top3 = []\n copy_list = new_list.copy()\n while len(top3) != 3:\n j = copy_list[0]\n coor = canteen_list[j][5]\n Min = calc_dis(x, y, coor[0], coor[1])\n food_court = ''\n for k in copy_list:\n coor = canteen_list[k][5]\n dist = calc_dis(x, y, coor[0], coor[1])\n if Min >= dist:\n Min = dist\n food_court = k\n index = copy_list.index(food_court)\n copy_list.pop(index)\n top3.append(food_court)\n print(top3)\n return top3\n\n\n<mask token>\nmapButton = button((255, 255, 255), 200, 250, 500, 100, 'Canteen Customisation'\n )\npredictButton = button((255, 255, 255), 100, 400, 300, 100, 'Prediction')\nexitButton = button((255, 255, 255), 500, 400, 300, 100, 'Exit')\nhalalButton = button((255, 255, 255), 50, 120, 250, 50, 'Halal')\nvegButton = button((255, 255, 255), 320, 120, 250, 50, 'Vegetarian')\nnonhalalButton = button((255, 255, 255), 590, 120, 250, 50, 'Non-Halal')\nkoreanButton = button((255, 255, 255), 50, 270, 250, 50, 'Korean')\nmalayButton = button((255, 255, 255), 320, 270, 250, 50, 'Malay')\njapanButton = button((255, 255, 255), 590, 270, 250, 50, 'Japanese')\nchineseButton = button((255, 255, 255), 50, 340, 250, 50, 'Chinese')\nindianButton = button((255, 255, 255), 320, 340, 250, 50, 'Indian')\nwesternButton = button((255, 255, 255), 590, 340, 250, 50, 'Western')\nbutton3 = button((255, 255, 255), 235, 490, 70, 50, '$3')\nbutton5 = button((255, 255, 255), 355, 490, 70, 50, '$5')\nbutton7 = button((255, 255, 255), 475, 490, 70, 50, '$7')\nbutton9 = button((255, 255, 255), 595, 490, 70, 50, '$10')\nnextButton = button((255, 255, 255), 730, 580, 120, 70, 'Next')\ngpsButton = button((255, 255, 255), 700, 600, 170, 50, 'to Map')\nrestartButton = button((255, 255, 255), 700, 600, 190, 50, 'Restart?')\n<mask token>\nhalalButtonPressed = False\nvegButtonPressed = False\nnonhalalButtonPressed = False\nkoreanButtonPressed = False\nmalayButtonPressed = False\njapanButtonPressed = False\nchineseButtonPressed = False\nindianButtonPressed = False\nwesternButtonPressed = False\nbutton3Pressed = False\nbutton5Pressed = False\nbutton7Pressed = False\nbutton9Pressed = False\nnextButtonPressed = False\ngpsButtonPressed = False\ncheckButton = True\nmapCoor = False\ncustomisationMenu = False\nmapCoor2 = False\neasySearch = False\ncomplicatedMenu = False\noneTime = True\n<mask token>\npygame.init()\nrun = True\nclock = pygame.time.Clock()\nwhile run:\n if checkButton:\n redrawMainWin(screen)\n if customisationMenu:\n redrawCustWin(screen)\n if easySearch:\n if oneTime:\n nearest_canteen = redrawSearchWin(screen, x, y)\n sleep(2)\n oneTime = False\n gpsButton.draw(screen, (0, 0, 0))\n if complicatedMenu:\n if oneTime:\n complicatedSearchWin(screen, nearest_canteen)\n sleep(2)\n oneTime = False\n gpsButton.draw(screen, (0, 0, 0))\n if gpsButtonPressed == True:\n redrawGPSMap(screen, nearest_canteen, x, y)\n pygame.display.update()\n clock.tick(30)\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n if gpsButtonPressed:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if restartButton.isOver(pos):\n restartButton.colour = 50, 50, 50\n restartButton.draw(screen, (0, 0, 0))\n pygame.display.update()\n print('clicked the restart button')\n halalButtonPressed = False\n vegButtonPressed = False\n nonhalalButtonPressed = False\n koreanButtonPressed = False\n malayButtonPressed = False\n japanButtonPressed = False\n chineseButtonPressed = False\n indianButtonPressed = False\n westernButtonPressed = False\n button3Pressed = False\n button5Pressed = False\n button7Pressed = False\n button9Pressed = False\n nextButtonPressed = False\n gpsButtonPressed = False\n checkButton = True\n mapCoor = False\n customisationMenu = False\n mapCoor2 = False\n easySearch = False\n complicatedMenu = False\n oneTime = True\n if event.type == pygame.MOUSEMOTION:\n if restartButton.isOver(pos):\n restartButton.colour = 0, 255, 0\n continue\n else:\n restartButton.colour = 255, 255, 255\n continue\n if easySearch == True or complicatedMenu == True:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if gpsButton.isOver(pos):\n gpsButton.colour = 50, 50, 50\n gpsButton.draw(screen, (0, 0, 0))\n pygame.display.update()\n print('clicked gps button')\n gpsButtonPressed = True\n easySearch = False\n complicatedMenu = False\n continue\n if event.type == pygame.MOUSEMOTION:\n if gpsButton.isOver(pos):\n gpsButton.colour = 0, 255, 0\n continue\n else:\n gpsButton.colour = 255, 255, 255\n continue\n if checkButton:\n if event.type == pygame.MOUSEBUTTONDOWN:\n if mapButton.isOver(pos):\n mapButton.colour = 0, 255, 0\n redrawMainWin(screen)\n pygame.display.update()\n print('clicked map button')\n sleep(0.5)\n redrawMap(screen)\n checkButton = False\n mapCoor = True\n continue\n if predictButton.isOver(pos):\n predictButton.colour = 0, 255, 0\n redrawMainWin(screen)\n pygame.display.update()\n print('clicked predict button')\n sleep(0.5)\n redrawMap(screen)\n checkButton = False\n mapCoor2 = True\n continue\n if exitButton.isOver(pos):\n exitButton.colour = 0, 255, 0\n print('Exiting...')\n skeleExit(screen)\n pygame.quit()\n run = False\n exit()\n if event.type == pygame.MOUSEMOTION:\n if mapButton.isOver(pos):\n mapButton.colour = 255, 0, 0\n else:\n mapButton.colour = 255, 255, 255\n if predictButton.isOver(pos):\n predictButton.colour = 255, 0, 0\n else:\n predictButton.colour = 255, 255, 255\n if exitButton.isOver(pos):\n exitButton.colour = 255, 0, 0\n else:\n exitButton.colour = 255, 255, 255\n if customisationMenu:\n if event.type == pygame.MOUSEMOTION:\n if nextButton.isOver(pos):\n nextButton.colour = 0, 0, 255\n else:\n nextButton.colour = 255, 255, 255\n continue\n if event.type == pygame.MOUSEBUTTONDOWN:\n if nextButton.isOver(pos):\n nextButton.colour = 255, 255, 0\n nextButtonPressed = True\n customisationMenu = False\n continue\n if halalButton.isOver(pos):\n if halalButtonPressed == False:\n if nonhalalButtonPressed:\n nonhalalButton.colour = 255, 255, 255\n nonhalalButtonPressed = False\n halalButton.colour = 0, 255, 0\n print('clicked Halal button')\n halalButtonPressed = True\n continue\n else:\n halalButton.colour = 255, 255, 255\n halalButtonPressed = False\n continue\n if vegButton.isOver(pos):\n if vegButtonPressed == False:\n if nonhalalButtonPressed:\n nonhalalButton.colour = 255, 255, 255\n nonhalalButtonPressed = False\n vegButton.colour = 0, 255, 0\n print('clicked Vegetarian button')\n vegButtonPressed = True\n continue\n else:\n vegButton.colour = 255, 255, 255\n vegButtonPressed = False\n continue\n if nonhalalButton.isOver(pos):\n if nonhalalButtonPressed == False:\n if halalButtonPressed:\n halalButton.colour = 255, 255, 255\n halalButtonPressed = False\n if vegButtonPressed:\n vegButton.colour = 255, 255, 255\n vegButtonPressed = False\n nonhalalButton.colour = 0, 255, 0\n print('clicked non-halal button')\n nonhalalButtonPressed = True\n continue\n else:\n nonhalalButton.colour = 255, 255, 255\n nonhalalButtonPressed = False\n if koreanButton.isOver(pos):\n if koreanButtonPressed == False:\n koreanButton.colour = 0, 255, 0\n print('clicked korean button')\n koreanButtonPressed = True\n continue\n else:\n koreanButton.colour = 255, 255, 255\n koreanButtonPressed = False\n if malayButton.isOver(pos):\n if malayButtonPressed == False:\n malayButton.colour = 0, 255, 0\n print('clicked Malay button')\n malayButtonPressed = True\n continue\n else:\n malayButton.colour = 255, 255, 255\n malayButtonPressed = False\n if japanButton.isOver(pos):\n if japanButtonPressed == False:\n japanButton.colour = 0, 255, 0\n print('clicked japan button')\n japanButtonPressed = True\n continue\n else:\n japanButton.colour = 255, 255, 255\n japanButtonPressed = False\n if chineseButton.isOver(pos):\n if chineseButtonPressed == False:\n chineseButton.colour = 0, 255, 0\n print('clicked chinese button')\n chineseButtonPressed = True\n continue\n else:\n chineseButton.colour = 255, 255, 255\n chineseButtonPressed = False\n if indianButton.isOver(pos):\n if indianButtonPressed == False:\n indianButton.colour = 0, 255, 0\n print('clicked indian button')\n indianButtonPressed = True\n continue\n else:\n indianButton.colour = 255, 255, 255\n indianButtonPressed = False\n if westernButton.isOver(pos):\n if westernButtonPressed == False:\n westernButton.colour = 0, 255, 0\n print('clicked western button')\n westernButtonPressed = True\n continue\n else:\n westernButton.colour = 255, 255, 255\n westernButtonPressed = False\n if button3.isOver(pos):\n if button3Pressed == False:\n if button5Pressed == True:\n button5.colour = 255, 255, 255\n button5Pressed = False\n if button7Pressed == True:\n button7.colour = 255, 255, 255\n button7Pressed = False\n if button9Pressed == True:\n button9.colour = 255, 255, 255\n button9Pressed = False\n button3.colour = 0, 255, 0\n print('clicked $3')\n button3Pressed = True\n continue\n else:\n button3.colour = 255, 255, 255\n button3Pressed = False\n if button5.isOver(pos):\n if button5Pressed == False:\n if button3Pressed == True:\n button3.colour = 255, 255, 255\n button3Pressed = False\n if button7Pressed == True:\n button7.colour = 255, 255, 255\n button7Pressed = False\n if button9Pressed == True:\n button9.colour = 255, 255, 255\n button9Pressed = False\n button5.colour = 0, 255, 0\n print('Clicked $5')\n button5Pressed = True\n continue\n else:\n button5.colour = 255, 255, 255\n button5Pressed = False\n if button7.isOver(pos):\n if button7Pressed == False:\n if button3Pressed == True:\n button3.colour = 255, 255, 255\n button3Pressed = False\n if button5Pressed == True:\n button5.colour = 255, 255, 255\n button5Pressed = False\n if button9Pressed == True:\n button9.colour = 255, 255, 255\n button9Pressed = False\n button7.colour = 0, 255, 0\n print('Clicked $7')\n button7Pressed = True\n continue\n else:\n button7.colour = 255, 255, 255\n button7Pressed = False\n if button9.isOver(pos):\n if button9Pressed == False:\n if button3Pressed == True:\n button3.colour = 255, 255, 255\n button3Pressed = False\n if button5Pressed == True:\n button5.colour = 255, 255, 255\n button5Pressed = False\n if button7Pressed == True:\n button7.colour = 255, 255, 255\n button7Pressed = False\n button9.colour = 0, 255, 0\n print('Clicked $10')\n button9Pressed = True\n continue\n else:\n button9.colour = 255, 255, 255\n button9Pressed = False\n if mapCoor == True and event.type == pygame.MOUSEBUTTONDOWN:\n mouseclick = mouseClick(screen)\n if mouseclick[0]:\n pygame.display.update()\n x = mouseclick[1]\n y = mouseclick[2]\n print(x, ',', y)\n mapCoor = False\n sleep(1)\n customisationMenu = True\n if mapCoor2 == True and event.type == pygame.MOUSEBUTTONDOWN:\n mouseclick = mouseClick(screen)\n if mouseclick[0]:\n pygame.display.update()\n x = mouseclick[1]\n y = mouseclick[2]\n print(x, ',', y)\n mapCoor2 = False\n sleep(1)\n loading(screen)\n easySearch = True\n if nextButtonPressed:\n sleep(1)\n loading(screen)\n user_prefList = []\n user_cuisineList = []\n user_budget = 0\n if halalButtonPressed:\n user_prefList.append('Halal')\n if vegButtonPressed:\n user_prefList.append('Vegetarian')\n if nonhalalButtonPressed:\n user_prefList.append('Non-Halal/Non-Vegetarian')\n if koreanButtonPressed:\n user_cuisineList.append('Korean')\n if malayButtonPressed:\n user_cuisineList.append('Malay')\n if japanButtonPressed:\n user_cuisineList.append('Japanese')\n if chineseButtonPressed:\n user_cuisineList.append('Chinese')\n if indianButtonPressed:\n user_cuisineList.append('Indian')\n if westernButtonPressed:\n user_cuisineList.append('Western')\n if button3Pressed:\n user_budget = 3\n if button5Pressed:\n user_budget = 5\n if button7Pressed:\n user_budget = 7\n if button9Pressed:\n user_budget = 9\n print(user_cuisineList)\n print(user_prefList)\n print(user_budget)\n finalID = final_list(user_budget, user_cuisineList, user_prefList)\n print(finalID)\n nearest_canteen = nearest_can(finalID, x, y)\n print(nearest_canteen)\n sleep(1)\n nextButtonPressed = False\n complicatedMenu = True\n",
"step-5": "\r\n\r\nimport pygame\r\nimport os\r\nfrom time import sleep\r\n\r\nscreen = pygame.display.set_mode((900,700))\r\nscreen.fill((255,255,255))\r\npygame.display.set_caption(\"NTUFOODIERECOMMENDSYSTEM\")\r\n\r\n'''\r\n###########################\r\n──╔╗────╔╗\r\n──║║───╔╝╚╗\r\n╔═╝╠╦══╬╗╔╬╦══╦═╗╔══╦═╦╗─╔╗\r\n║╔╗╠╣╔═╝║║╠╣╔╗║╔╗╣╔╗║╔╣║─║║\r\n║╚╝║║╚═╗║╚╣║╚╝║║║║╔╗║║║╚═╝║\r\n╚══╩╩══╝╚═╩╩══╩╝╚╩╝╚╩╝╚═╗╔╝\r\n──────────────────────╔═╝║\r\n──────────────────────╚══╝\r\n###########################\r\n● Database is stored on site.\r\n● Updating is relatively simple.\r\n● Programme runs on the basis of pygame, it's hard to update it without text input.\r\n● However, it can easily be done so on shell/console accordingly. \r\n'''\r\n# Food court lists is sorted by [Highest Cost, Lowest Cost, Cuisines Available, Closing Time, Food Preferences Available, Coordinates on NTU Map] ; THE items have keys and corresponding values expressed as a pair, key: value\r\n# where the keys would be that of the canteen names and this would be associated with that of the corresponding properties tht is alloted to it. \r\ncanteen_list = {\r\n \"Food Court 1\": [12, 3.5, [\"Korean\", \"Japanese\", \"Western\"], 2100, [\"Halal\", \"Non-Halal/Non-Vegetarian\"], (442, 473)],\r\n \"Food Court 2\": [10, 3.6, [\"Korean\", \"Chinese\", \"Malay\", ], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (477, 409)],\r\n \"Food Court 4\": [10, 3, [\"Chinese\", \"Western\"], 2100, [\"Non-Halal/Non-Vegetarian\"], (358,526)],\r\n \"Food Court 9\": [10, 3.5, [\"Chinese\"], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (582, 288)],\r\n \"Food Court 11\": [10, 2.5, [\"Chinese\", \"Indian\", \"Japanese\", \"Western\"], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (682, 243)],\r\n \"Food Court 13\": [9, 2, [\"Western\", \"Korean\", \"Japanese\", \"Chinese\"], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (445, 176)],\r\n \"Food Court 14\": [8, 3, [\"Western\", \"Chinese\", \"Korean\", \"Malay\"], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (509, 182)],\r\n \"Food Court 16\": [10, 3.3, [\"Japanese\", \"Chinese\", \"Korean\", \"Indian\"], 2100, [\"Halal\", \"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (405, 221)],\r\n \"Tamarind Food Court\": [10, 3, [\"Malay\", \"Chinese\", \"Korean\", \"Western\"], 2100, [\"Halal\", \"Non-Halal\", \"Vegetarian\",\"Non-Halal/Non-Vegetarian\"], (627, 200)],\r\n \"Pioneer Food Court\": [20, 2.3, [\"Thai\", \"Chinese\"], 0000, [\"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (497, 561)],\r\n \"North Spine Food Court\": [10, 2.5, [\"Korean\", \"Japanese\", \"Chinese\", \"Western\", \"Malay\"], 2100, [\"Vegetarian\", \"Non-Halal/Non-Vegetarian\"], (275, 293)],\r\n \"North Spine Plaza\": [10, 4, [\"Western\", \"Korean\"], 2130, [\"Vegetarian\", \"Halal\", \"Non-Halal/Non-Vegetarian\"], (287, 339)],\r\n \"South Spine Food Court\": [10, 2, [\"Chinese\", \"Malay\", \"Korean\", \"Japanese\", \"Western\"], 2100, [\"Vegetarian\", \"Halal\", \"Non-Halal/Non-Vegetarian\"], (227, 496)],\r\n \"Quad Cafe\": [10, 2.4, [\"Korean\", \"Chinese\", \"Indian\", \"Malay\"], 2100, [\"Vegetarian\", \"Halal\", \"Non-Halal/Non-Vegetarian\"], (224, 351)],\r\n \"Coffee Bean\": [20, 4, [\"Western\"], 2000, [\"Vegetarian\", \"Halal\", \"Non-Halal/Non-Vegetarian\"], (219, 389)],\r\n \"North Hill Food Court\": [10, 3.8, [\"Chinese\", \"Malay\", \"Indian\"], 2100, [\"Vegetarian\", \"Halal\", \"Non-Halal/Non-Vegetarian\"], (720,314)]\r\n }\r\n\r\n'''\r\n###########################################\r\n───╔╗───────────╔═╗─────╔╗─────╔╗─╔╗\r\n───║║───────────║╔╝─────║║────╔╝╚╦╝╚╗\r\n╔══╣║╔══╦══╦══╗╔╝╚╦══╦═╗║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗\r\n║╔═╣║║╔╗║══╣══╣╚╗╔╣╔╗║╔╝║╔╗║║║║║║─║║║╔╗║╔╗╗\r\n║╚═╣╚╣╔╗╠══╠══║─║║║╚╝║║─║╚╝║╚╝║║╚╗║╚╣╚╝║║║║\r\n╚══╩═╩╝╚╩══╩══╝─╚╝╚══╩╝─╚══╩══╝╚═╝╚═╩══╩╝╚╝\r\n###########################################\r\n● We had help from online tutorials to workout the UI buttons functionality. \r\n● A bit of corresponding tweaks incorporating into project from the tutorial that I learnt from\r\n● ref: https://www.youtube.com/watch?v=4_9twnEduFA\r\n'''\r\nclass button():\r\n def __init__(self, colour, x, y, width, height, text=''):\r\n self.colour = colour\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.text = text\r\n\r\n def draw(self,win,outline = None):\r\n if outline:\r\n #draw a bigger rectangle behind to create a border\r\n pygame.draw.rect(win, outline, (self.x-2, self.y-2, self.width+4, self.height+4),0)\r\n #draws the button rectangle\r\n pygame.draw.rect(win, self.colour, (self.x, self.y, self.width, self.height),0)\r\n\r\n if self.text != '':\r\n font = pygame.font.SysFont('calligrapher.ttf', 60)\r\n text = font.render(self.text, 1, (0,0,0))\r\n win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))\r\n\r\n def isOver(self, pos):\r\n #pos is the mouse position (x,y) coordinates\r\n if pos[0] > self.x and pos[0] < self.x + self.width:\r\n if pos[1] > self.y and pos[1] < self.y + self.height:\r\n return True\r\n else: \r\n return False\r\n\r\n'''\r\n##################################\r\n─╔═╗─────────╔╗\r\n─║╔╝────────╔╝╚╗\r\n╔╝╚╦╗╔╦═╗╔══╬╗╔╬╦══╦═╗╔══╗\r\n╚╗╔╣║║║╔╗╣╔═╝║║╠╣╔╗║╔╗╣══╣\r\n─║║║╚╝║║║║╚═╗║╚╣║╚╝║║║╠══║\r\n─╚╝╚══╩╝╚╩══╝╚═╩╩══╩╝╚╩══╝\r\n##################################\r\n╔═╗────────╔╗\r\n║═╬═╦╦╗╔═╦╦╬╣\r\n║╔╣╬║╔╝║╬║║║║\r\n╚╝╚═╩╝─╠╗╠═╩╝\r\n───────╚═╝\r\n#################\r\n● Most of the functions here help to draw out the different states of the screen, that the screen could be in\r\n● The redraw functions help to update the display based on it's respective transitory states\r\n'''\r\n#3 functions here controls the Surface Text appearancese\r\ndef text(text,win,x,y):\r\n font = pygame.font.SysFont('freesansbold.ttf', 50)\r\n phrase = font.render(text, 1, (0,0,0))\r\n win.blit(phrase, (x,y))\r\n\r\ndef instructionText(text,win,x,y):\r\n font = pygame.font.SysFont('Arial', 20)\r\n phrase = font.render(text, 1, (0,0,0))\r\n win.blit(phrase, (x,y))\r\n\r\ndef header(text,win,x,y):\r\n font = pygame.font.SysFont('TimesNewRoman', 70)\r\n phrase = font.render(text, 1, (0,0,0))\r\n win.blit(phrase, (x,y))\r\n\r\ndef mouseClick(screen):\r\n #checks for mouseclick event, and fetches corresp. positions \r\n x,y = pygame.mouse.get_pos()\r\n \r\n if (x >= 65 and x <=727) and (y >=82 and y <= 618):\r\n #print(event.button)\r\n pygame.draw.circle(screen, (255,0,150), (x,y), 15)\r\n return True, x, y\r\n else:\r\n print(\"Out of bounds!\")\r\n return False, x, y\r\n\r\ndef skeleExit(win):\r\n #exit event\r\n aryadelight = pygame.image.load(os.path.join(\"NTUFoodieRecsv1.png\"))\r\n win.blit(aryadelight,(0,0))\r\n pygame.display.update()\r\n xaxis = 100\r\n for i in range(1,42):\r\n image = str(i) + \".png\"\r\n skele = pygame.image.load(os.path.join(image))\r\n win.blit(skele, (250,200))\r\n text(\"Exiting...\", win, (xaxis+20), 600)\r\n pygame.display.update()\r\n sleep(0.09)\r\n\r\ndef loading(win):\r\n #loading screen, slep interval defined as 0.3 seconds to load subs. frame \r\n x = 0\r\n while x < 3:\r\n load0 = pygame.image.load(os.path.join(\"load0.png\"))\r\n win.blit(load0, (0,0))\r\n pygame.display.update()\r\n sleep(0.3)\r\n load1 = pygame.image.load(os.path.join(\"load1.png\"))\r\n win.blit(load1, (0,0))\r\n pygame.display.update()\r\n sleep(0.3)\r\n load2 = pygame.image.load(os.path.join(\"load2.png\"))\r\n win.blit(load2, (0,0))\r\n pygame.display.update()\r\n sleep(0.3)\r\n load3 = pygame.image.load(os.path.join(\"load3.png\"))\r\n win.blit(load3, (0,0))\r\n pygame.display.update()\r\n sleep(0.3)\r\n x += 1\r\n# ---------------------------------------------------------------------------# \r\ndef redrawMap(screen):\r\n #draws the embedded NTU map image provided \r\n NTUmap = pygame.image.load(os.path.join(\"NTUMap.jpg\"))\r\n screen.blit(NTUmap, (0,0))\r\n for x in range(50,900,50):\r\n #y axial grids\r\n pygame.draw.rect(screen, (255,0,0), (x, 0, 1, 700), 0)\r\n for y in range(50,700,50):\r\n #x axial grids\r\n pygame.draw.rect(screen, (255,0,0), (0, y, 900, 1), 0)\r\n text('Please click on your current location!',screen,200,100)\r\n\r\ndef redrawGPSMap(screen, top3, x, y):\r\n #redraw NTU map, but this time with corresponding location coordinates\r\n NTUmap = pygame.image.load(os.path.join(\"NTUMap.jpg\"))\r\n screen.blit(NTUmap, (0,0))\r\n redGPS = pygame.image.load(os.path.join(\"redgps.png\"))\r\n screen.blit(redGPS, (x-16,y-32))\r\n instructionText(\"You are currently at this position.\", screen, x+4, y-10)\r\n counter = 1\r\n for i in top3:\r\n coor = canteen_list[i][5]\r\n if counter == 1:\r\n blueGPS = pygame.image.load(os.path.join(\"bluegps.png\"))\r\n screen.blit(blueGPS, (coor[0]-12,coor[1]-24))\r\n instructionText(i, screen, coor[0]-24, coor[1])\r\n pass\r\n if counter == 2:\r\n blackGPS = pygame.image.load(os.path.join(\"blackgps.png\"))\r\n screen.blit(blackGPS, (coor[0]-12,coor[1]-24))\r\n instructionText(i, screen, coor[0]-24, coor[1])\r\n pass\r\n if counter == 3:\r\n yellowGPS = pygame.image.load(os.path.join(\"yellowgps.png\"))\r\n screen.blit(yellowGPS, (coor[0]-12,coor[1]-24))\r\n instructionText(i, screen, coor[0]-24, coor[1])\r\n pass\r\n counter += 1\r\n restartButton.draw(screen, (0,0,0))\r\n\r\ndef redrawMainWin(screen):\r\n #functionality that controls what is displayed on the main window\r\n aryadelight = pygame.image.load(os.path.join(\"NTUFoodieRecsv1.png\"))\r\n screen.blit(aryadelight,(0,0))\r\n mapButton.draw(screen, (0,0,0))\r\n instructionText(\"(Choose your cuisines, preferences and budget for the meal here!)\",screen,215,320)\r\n predictButton.draw(screen, (0,0,0))\r\n instructionText(\"(Find the nearest canteen!)\",screen,132,470)\r\n exitButton.draw(screen, (0,0,0))\r\n ice = pygame.image.load(os.path.join(\"ice.png\"))\r\n screen.blit(ice, (500,670))\r\n font = pygame.font.SysFont('verdana', 20)\r\n creator = font.render(\"Made by HweeHean X Arya\", 1, (0,0,200))\r\n screen.blit(creator, (535,670))\r\n\r\ndef redrawCustWin(screen):\r\n #controls what is displayed on the customisation window\r\n bp = pygame.image.load(os.path.join(\"gradient.jpg\"))\r\n screen.blit(bp,(0,0))\r\n instructionText('Left click again to reset!',screen,300,20)\r\n text('Please select your food preference: ', screen, 100, 50)\r\n halalButton.draw(screen, (0,0,0))\r\n vegButton.draw(screen, (0,0,0))\r\n nonhalalButton.draw(screen, (0,0,0))\r\n text('Please select your cuisine type: ', screen, 100, 200)\r\n koreanButton.draw(screen, (0,0,0))\r\n malayButton.draw(screen, (0,0,0))\r\n japanButton.draw(screen, (0,0,0))\r\n chineseButton.draw(screen, (0,0,0))\r\n indianButton.draw(screen, (0,0,0))\r\n westernButton.draw(screen, (0,0,0))\r\n text('Please select your maximum budget: ', screen, 100, 430)\r\n button3.draw(screen, (0,0,0))\r\n button5.draw(screen, (0,0,0))\r\n button7.draw(screen, (0,0,0))\r\n button9.draw(screen, (0,0,0))\r\n nextButton.draw(screen, (0,0,0))\r\n\r\ndef redrawSearchWin(screen,x,y):\r\n #gives the top 3 most relevant results for the prediction tab\r\n bp = pygame.image.load(os.path.join(\"NTUFoodieRecsv1.png\"))\r\n screen.blit(bp,(0,0))\r\n GordonRamsay = pygame.image.load(os.path.join(\"GordonRamsay.png\"))\r\n screen.blit(GordonRamsay, (400,100))\r\n distList = []\r\n for i in canteen_list:\r\n distList.append(i)\r\n print(distList)\r\n top3 = nearest_can(distList, x, y)\r\n print(top3)\r\n text(\"Nearest Canteen:\",screen,110,400)\r\n yaxis = 490\r\n canteenCount = 1\r\n for k in top3:\r\n if canteenCount == 1:\r\n if k == \"Food Court 1\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen1.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 2\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen2.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 4\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen4.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 9\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen9.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 11\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen11.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 13\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen13.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 14\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen14.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 16\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen16.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Tamarind Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"Tamarind.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Pioneer Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"Pioneer.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Spine Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthSpine.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Spine Plaza\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthSpinePlaza.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"South Spine Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"SouthSpineKoufuFoodCourt.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Quad Cafe\":\r\n canteenPic = pygame.image.load(os.path.join(\"Quad.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Coffee Bean\":\r\n canteenPic = pygame.image.load(os.path.join(\"Coffee.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Hill Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthHill.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n text(str(canteenCount), screen, 110, yaxis)\r\n text(\".\", screen, 135, yaxis)\r\n text(k,screen,150,yaxis)\r\n canteenCount += 1\r\n yaxis += 70\r\n return top3\r\n\r\ndef complicatedSearchWin(screen,top3):\r\n #displays the top3 results for the end user after clicking customisation\r\n bp = pygame.image.load(os.path.join(\"NTUFoodieRecsv1.png\"))\r\n screen.blit(bp,(0,0))\r\n GordonRamsay = pygame.image.load(os.path.join(\"GordonRamsay.png\"))\r\n screen.blit(GordonRamsay, (400,100))\r\n text(\"Nearest Canteen:\",screen,110,400)\r\n yaxis = 490\r\n canteenCount = 1\r\n for k in top3:\r\n if canteenCount == 1:\r\n if k == \"Food Court 1\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen1.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 2\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen2.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 4\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen4.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 9\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen9.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 11\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen11.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 13\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen13.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 14\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen14.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Food Court 16\":\r\n canteenPic = pygame.image.load(os.path.join(\"Canteen16.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Tamarind Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"Tamarind.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Pioneer Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"Pioneer.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Spine Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthSpine.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Spine Plaza\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthSpinePlaza.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"South Spine Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"SouthSpineKoufuFoodCourt.png\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Quad Cafe\":\r\n canteenPic = pygame.image.load(os.path.join(\"Quad.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"Coffee Bean\":\r\n canteenPic = pygame.image.load(os.path.join(\"Coffee.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n if k == \"North Hill Food Court\":\r\n canteenPic = pygame.image.load(os.path.join(\"NorthHill.jpg\"))\r\n screen.blit(canteenPic, (150,200))\r\n text(str(canteenCount), screen, 110, yaxis)\r\n text(\".\", screen, 135, yaxis)\r\n text(k,screen,150,yaxis)\r\n canteenCount += 1\r\n yaxis += 70\r\n\r\n'''\r\n╔═╗────╔═╗───╔╗╔╗\r\n║═╬═╦╦╗║═╬═╦╦╣╚╬╬═╦╦═╗\r\n║╔╣╬║╔╝╠═║╬║╔╣╔╣║║║║╬║\r\n╚╝╚═╩╝─╚═╩═╩╝╚═╩╩╩═╬╗║\r\n───────────────────╚═╝\r\n###########################\r\n● Functions below control how we do the sorting for the distance\r\n and the different cuisines\r\n'''\r\n#function provided by ARYA\r\n#function to compile a list of all the relevant food courts\r\ndef final_list(user_budget, user_cuisine, user_preference):\r\n new_list = []\r\n\r\n #Creating a list of all food courts that fit in the user's budget\r\n for i in canteen_list:\r\n if user_budget >= canteen_list[i][1]:\r\n new_list.append(i) \r\n \r\n #Creating a list of all food courts according to the imposed constraints on cuisine\r\n for c in user_cuisine:\r\n for i in canteen_list:\r\n if c in canteen_list[i][2]:\r\n new_list.append(i)\r\n\r\n #Adding to the list, all the food courts according to the food preferences specified \r\n for c in user_preference:\r\n for i in canteen_list:\r\n if c in canteen_list[i][4]:\r\n new_list.append(i)\r\n\r\n #eliminating all the repeated options\r\n new_list = list(set(new_list))\r\n\r\n #if new_list is empty due to no selection made\r\n if len(new_list) == 0:\r\n for i in canteen_list:\r\n new_list.append(i)\r\n return(new_list)\r\n\r\n#function to calulate the horizontal distance from you to proposed option\r\ndef calc_dis(x1, y1, x2, y2):\r\n return ((x1-x2)**2 + (y1-y2)**2)**1/2\r\n\r\n#function to find out the nearest suitable food outlet/food court\r\ndef nearest_can(new_list, x, y):\r\n top3 = []\r\n copy_list = new_list.copy()\r\n while len(top3) != 3:\r\n j = copy_list[0]\r\n coor = canteen_list[j][5]\r\n Min = calc_dis(x, y, coor[0], coor[1])\r\n food_court = ''\r\n for k in copy_list:\r\n #coordinates of the food court\r\n coor = canteen_list[k][5]\r\n dist = calc_dis(x, y, coor[0], coor[1])\r\n if Min >= dist:\r\n Min = dist\r\n food_court = k\r\n index = copy_list.index(food_court)\r\n copy_list.pop(index)\r\n top3.append(food_court)\r\n print(top3)\r\n return top3\r\n\r\n'''\r\n#########################\r\n╔╗─────╔╗─╔╗\r\n║║────╔╝╚╦╝╚╗\r\n║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗╔══╗\r\n║╔╗║║║║║║─║║║╔╗║╔╗╣══╣\r\n║╚╝║╚╝║║╚╗║╚╣╚╝║║║╠══║\r\n╚══╩══╝╚═╝╚═╩══╩╝╚╩══╝\r\n#########################\r\n● This is where the buttons are defined. Using the class...\r\n● They are relatively self-explanatory\r\n'''\r\n\r\n#buttons for the main loading page:\r\nmapButton = button((255,255,255), 200, 250, 500, 100, 'Canteen Customisation')\r\npredictButton = button((255,255,255), 100, 400, 300, 100, 'Prediction')\r\nexitButton = button((255,255,255), 500, 400, 300, 100, 'Exit')\r\n\r\n#buttons for the custimisation screen:\r\nhalalButton = button((255,255,255), 50, 120, 250, 50, 'Halal')\r\nvegButton = button((255,255,255), 320, 120, 250, 50, 'Vegetarian')\r\nnonhalalButton = button((255,255,255), 590, 120, 250, 50, 'Non-Halal')\r\nkoreanButton = button((255,255,255), 50, 270, 250, 50, 'Korean')\r\nmalayButton = button((255,255,255), 320, 270, 250, 50, 'Malay')\r\njapanButton = button((255,255,255), 590, 270, 250, 50, 'Japanese')\r\nchineseButton = button((255,255,255), 50, 340, 250, 50, 'Chinese')\r\nindianButton = button((255,255,255), 320, 340, 250, 50, 'Indian')\r\nwesternButton = button((255,255,255), 590, 340, 250, 50, 'Western')\r\nbutton3 = button((255,255,255), 235, 490, 70, 50, '$3')\r\nbutton5 = button((255,255,255), 355, 490, 70, 50, '$5')\r\nbutton7 = button((255,255,255), 475, 490, 70, 50, '$7')\r\nbutton9 = button((255,255,255), 595, 490, 70, 50, '$10')\r\nnextButton = button((255,255,255), 730, 580, 120, 70, 'Next')\r\n\r\n#buttons to showcase GPS:\r\ngpsButton = button((255,255,255), 700, 600, 170, 50, 'to Map')\r\nrestartButton = button((255,255,255), 700, 600, 190, 50, 'Restart?')\r\n\r\n'''\r\n#############################\r\n────╔╗────╔╗\r\n───╔╝╚╗──╔╝╚╗\r\n╔══╬╗╔╬══╬╗╔╬══╦══╗\r\n║══╣║║║╔╗║║║║║═╣══╣\r\n╠══║║╚╣╔╗║║╚╣║═╬══║\r\n╚══╝╚═╩╝╚╝╚═╩══╩══╝\r\n#############################\r\n● Since I'm only using one while loop and all the functions are in here,\r\n it is important to note that none of the \"if\" statements interfere with\r\n each other\r\n● Acts like a flip-flop which stores the data of the different STATES\r\n'''\r\n#originalstate of customisation buttons\r\nhalalButtonPressed = False\r\nvegButtonPressed = False\r\nnonhalalButtonPressed = False\r\nkoreanButtonPressed = False\r\nmalayButtonPressed = False\r\njapanButtonPressed = False\r\nchineseButtonPressed = False\r\nindianButtonPressed = False\r\nwesternButtonPressed = False\r\nbutton3Pressed = False\r\nbutton5Pressed = False\r\nbutton7Pressed = False\r\nbutton9Pressed = False\r\nnextButtonPressed = False\r\ngpsButtonPressed = False\r\n\r\n#original state of events\r\ncheckButton = True\r\nmapCoor = False\r\ncustomisationMenu = False\r\nmapCoor2 = False\r\neasySearch = False\r\ncomplicatedMenu = False\r\noneTime = True\r\n\r\n'''\r\n####################################\r\n╔═╗╔═╗───────╔═══╗\r\n║║╚╝║║───────║╔═╗║\r\n║╔╗╔╗╠══╦╦═╗─║╚═╝╠═╦══╦══╦═╦══╦╗╔╗\r\n║║║║║║╔╗╠╣╔╗╗║╔══╣╔╣╔╗║╔╗║╔╣╔╗║╚╝║\r\n║║║║║║╔╗║║║║║║║──║║║╚╝║╚╝║║║╔╗║║║║\r\n╚╝╚╝╚╩╝╚╩╩╝╚╝╚╝──╚╝╚══╩═╗╠╝╚╝╚╩╩╩╝\r\n──────────────────────╔═╝║\r\n──────────────────────╚══╝\r\n####################################\r\n● It involves a lot of existing predefined states, turning on and off to display\r\n multiple things without them interfering with each other's functionality\r\n● I.e. Clicking customisation button will disable itself, hence\r\n if the mouse is clicked over at the same area, it will not\r\n be activated again.\r\n● This is every important to have a smooth flow. \r\n● Also left some debugging messages within the console to help\r\n understand what is going on behind the scenes\r\n'''\r\npygame.init()\r\nrun = True\r\nclock = pygame.time.Clock()\r\n#start the pygame programme \r\nwhile run:\r\n #if true, redraws the main window\r\n if checkButton:\r\n redrawMainWin(screen)\r\n #if true, redraws the customisation window\r\n if customisationMenu:\r\n redrawCustWin(screen)\r\n if easySearch:\r\n if oneTime:\r\n nearest_canteen = redrawSearchWin(screen, x, y)\r\n sleep(2)\r\n oneTime = False\r\n gpsButton.draw(screen, (0,0,0))\r\n #if true, redraws the complicated cusomisation results\r\n if complicatedMenu:\r\n if oneTime:\r\n complicatedSearchWin(screen, nearest_canteen)\r\n sleep(2)\r\n oneTime = False\r\n gpsButton.draw(screen, (0,0,0))\r\n #redraws the GPS map, with point locaters indicated\r\n if gpsButtonPressed == True:\r\n redrawGPSMap(screen, nearest_canteen, x, y)\r\n pygame.display.update()\r\n clock.tick(30)\r\n\r\n #checks event\r\n for event in pygame.event.get():\r\n #Fetches the mouse position\r\n pos = pygame.mouse.get_pos()\r\n\r\n #Quits the pygame programme\r\n if event.type == pygame.QUIT:\r\n run = False\r\n pygame.quit()\r\n\r\n if gpsButtonPressed:\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if restartButton.isOver(pos):\r\n restartButton.colour = (50,50,50)\r\n restartButton.draw(screen, (0,0,0))\r\n pygame.display.update()\r\n print('clicked the restart button')\r\n #original state of customisation buttons\r\n halalButtonPressed = False\r\n vegButtonPressed = False\r\n nonhalalButtonPressed = False\r\n koreanButtonPressed = False\r\n malayButtonPressed = False\r\n japanButtonPressed = False\r\n chineseButtonPressed = False\r\n indianButtonPressed = False\r\n westernButtonPressed = False\r\n button3Pressed = False\r\n button5Pressed = False\r\n button7Pressed = False\r\n button9Pressed = False\r\n nextButtonPressed = False\r\n gpsButtonPressed = False\r\n #original state of events\r\n checkButton = True\r\n mapCoor = False\r\n customisationMenu = False\r\n mapCoor2 = False\r\n easySearch = False\r\n complicatedMenu = False\r\n oneTime = True\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n if restartButton.isOver(pos):\r\n restartButton.colour = (0,255,0)\r\n continue\r\n else:\r\n restartButton.colour = (255,255,255)\r\n continue\r\n\r\n if easySearch == True or complicatedMenu == True:\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if gpsButton.isOver(pos):\r\n gpsButton.colour = (50,50,50)\r\n gpsButton.draw(screen, (0,0,0))\r\n pygame.display.update()\r\n print('clicked gps button')\r\n gpsButtonPressed = True\r\n easySearch = False\r\n complicatedMenu = False\r\n continue\r\n\r\n if event.type == pygame.MOUSEMOTION:\r\n if gpsButton.isOver(pos):\r\n gpsButton.colour = (0,255,0)\r\n continue\r\n else:\r\n gpsButton.colour = (255,255,255)\r\n continue\r\n \r\n #if mouse is clicked over buttons (main page)\r\n if checkButton:\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if mapButton.isOver(pos):\r\n mapButton.colour = (0,255,0)\r\n redrawMainWin(screen)\r\n pygame.display.update()\r\n print('clicked map button')\r\n sleep(0.5)\r\n redrawMap(screen)\r\n checkButton = False\r\n mapCoor = True\r\n continue\r\n \r\n if predictButton.isOver(pos):\r\n predictButton.colour = (0,255,0)\r\n redrawMainWin(screen)\r\n pygame.display.update()\r\n print('clicked predict button')\r\n sleep(0.5)\r\n redrawMap(screen)\r\n checkButton = False\r\n mapCoor2 = True\r\n continue\r\n\r\n if exitButton.isOver(pos):\r\n exitButton.colour = (0,255,0)\r\n print('Exiting...')\r\n skeleExit(screen)\r\n pygame.quit()\r\n run = False\r\n exit()\r\n\r\n #if mouse hovered over the button (main page)\r\n if event.type == pygame.MOUSEMOTION:\r\n if mapButton.isOver(pos):\r\n mapButton.colour = (255,0,0)\r\n else:\r\n mapButton.colour = (255,255,255)\r\n\r\n if predictButton.isOver(pos):\r\n predictButton.colour = (255,0,0)\r\n else:\r\n predictButton.colour = (255,255,255)\r\n\r\n if exitButton.isOver(pos):\r\n exitButton.colour = (255,0,0)\r\n else: \r\n exitButton.colour = (255,255,255)\r\n\r\n #clicking buttons in the customisation menu:\r\n if customisationMenu:\r\n if event.type == pygame.MOUSEMOTION:\r\n if nextButton.isOver(pos):\r\n nextButton.colour = (0,0,255)\r\n else:\r\n nextButton.colour = (255,255,255)\r\n continue\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n #clicking on next button\r\n if nextButton.isOver(pos):\r\n nextButton.colour = (255,255,0)\r\n nextButtonPressed = True\r\n customisationMenu = False\r\n continue\r\n\r\n if halalButton.isOver(pos):\r\n if halalButtonPressed == False:\r\n if nonhalalButtonPressed:\r\n nonhalalButton.colour = (255,255,255)\r\n nonhalalButtonPressed = False\r\n halalButton.colour = (0,255,0)\r\n print('clicked Halal button')\r\n halalButtonPressed = True\r\n continue\r\n else:\r\n halalButton.colour = (255,255,255)\r\n halalButtonPressed = False\r\n continue\r\n \r\n if vegButton.isOver(pos):\r\n if vegButtonPressed == False:\r\n if nonhalalButtonPressed:\r\n nonhalalButton.colour = (255,255,255)\r\n nonhalalButtonPressed = False\r\n vegButton.colour = (0,255,0)\r\n print('clicked Vegetarian button')\r\n vegButtonPressed = True\r\n continue\r\n else:\r\n vegButton.colour = (255,255,255)\r\n vegButtonPressed = False\r\n continue\r\n\r\n if nonhalalButton.isOver(pos):\r\n if nonhalalButtonPressed == False:\r\n if halalButtonPressed:\r\n halalButton.colour = (255,255,255)\r\n halalButtonPressed = False\r\n if vegButtonPressed:\r\n vegButton.colour = (255,255,255)\r\n vegButtonPressed = False\r\n nonhalalButton.colour = (0,255,0)\r\n print('clicked non-halal button')\r\n nonhalalButtonPressed = True\r\n continue\r\n else:\r\n nonhalalButton.colour = (255,255,255)\r\n nonhalalButtonPressed = False\r\n\r\n if koreanButton.isOver(pos):\r\n if koreanButtonPressed == False:\r\n koreanButton.colour = (0,255,0)\r\n print('clicked korean button')\r\n koreanButtonPressed = True\r\n continue\r\n else:\r\n koreanButton.colour = (255,255,255)\r\n koreanButtonPressed = False\r\n\r\n if malayButton.isOver(pos):\r\n if malayButtonPressed == False:\r\n malayButton.colour = (0,255,0)\r\n print('clicked Malay button')\r\n malayButtonPressed = True\r\n continue\r\n else:\r\n malayButton.colour = (255,255,255)\r\n malayButtonPressed = False\r\n\r\n if japanButton.isOver(pos):\r\n if japanButtonPressed == False:\r\n japanButton.colour = (0,255,0)\r\n print('clicked japan button')\r\n japanButtonPressed = True\r\n continue\r\n else:\r\n japanButton.colour = (255,255,255)\r\n japanButtonPressed = False\r\n\r\n if chineseButton.isOver(pos):\r\n if chineseButtonPressed == False:\r\n chineseButton.colour = (0,255,0)\r\n print('clicked chinese button')\r\n chineseButtonPressed = True\r\n continue\r\n else:\r\n chineseButton.colour = (255,255,255)\r\n chineseButtonPressed = False\r\n\r\n if indianButton.isOver(pos):\r\n if indianButtonPressed == False:\r\n indianButton.colour = (0,255,0)\r\n print('clicked indian button')\r\n indianButtonPressed = True\r\n continue\r\n else:\r\n indianButton.colour = (255,255,255)\r\n indianButtonPressed = False\r\n\r\n if westernButton.isOver(pos):\r\n if westernButtonPressed == False:\r\n westernButton.colour = (0,255,0)\r\n print('clicked western button')\r\n westernButtonPressed = True\r\n continue\r\n else:\r\n westernButton.colour = (255,255,255)\r\n westernButtonPressed = False\r\n \r\n if button3.isOver(pos):\r\n if button3Pressed == False:\r\n if button5Pressed == True:\r\n button5.colour = (255,255,255)\r\n button5Pressed = False\r\n if button7Pressed == True:\r\n button7.colour = (255,255,255)\r\n button7Pressed = False\r\n if button9Pressed == True:\r\n button9.colour = (255,255,255)\r\n button9Pressed = False\r\n button3.colour = (0,255,0)\r\n print('clicked $3')\r\n button3Pressed = True\r\n continue\r\n else:\r\n button3.colour = (255,255,255)\r\n button3Pressed = False\r\n \r\n if button5.isOver(pos):\r\n if button5Pressed == False:\r\n if button3Pressed == True:\r\n button3.colour = (255,255,255)\r\n button3Pressed = False\r\n if button7Pressed == True:\r\n button7.colour = (255,255,255)\r\n button7Pressed = False\r\n if button9Pressed == True:\r\n button9.colour = (255,255,255)\r\n button9Pressed = False\r\n button5.colour = (0,255,0)\r\n print('Clicked $5')\r\n button5Pressed = True\r\n continue\r\n else:\r\n button5.colour = (255,255,255)\r\n button5Pressed = False\r\n\r\n if button7.isOver(pos):\r\n if button7Pressed == False:\r\n if button3Pressed == True:\r\n button3.colour = (255,255,255)\r\n button3Pressed = False\r\n if button5Pressed == True:\r\n button5.colour = (255,255,255)\r\n button5Pressed = False\r\n if button9Pressed == True:\r\n button9.colour = (255,255,255)\r\n button9Pressed = False\r\n button7.colour = (0,255,0)\r\n print('Clicked $7')\r\n button7Pressed = True\r\n continue\r\n else:\r\n button7.colour = (255,255,255)\r\n button7Pressed = False\r\n\r\n if button9.isOver(pos):\r\n if button9Pressed == False:\r\n if button3Pressed == True:\r\n button3.colour = (255,255,255)\r\n button3Pressed = False\r\n if button5Pressed == True:\r\n button5.colour = (255,255,255)\r\n button5Pressed = False\r\n if button7Pressed == True:\r\n button7.colour = (255,255,255)\r\n button7Pressed = False\r\n button9.colour = (0,255,0)\r\n print('Clicked $10')\r\n button9Pressed = True\r\n continue\r\n else:\r\n button9.colour = (255,255,255)\r\n button9Pressed = False \r\n\r\n #if mousebuttondown and map is already displayed\r\n if mapCoor == True and event.type == pygame.MOUSEBUTTONDOWN:\r\n mouseclick = mouseClick(screen)\r\n if mouseclick[0]:\r\n pygame.display.update()\r\n x = mouseclick[1]\r\n y = mouseclick[2]\r\n print(x, ',', y)\r\n #pygame.time.delay(2000) \r\n mapCoor = False\r\n sleep(1)\r\n customisationMenu = True\r\n\r\n #if prediction button is clicked\r\n if mapCoor2 == True and event.type == pygame.MOUSEBUTTONDOWN:\r\n mouseclick = mouseClick(screen)\r\n if mouseclick[0]:\r\n pygame.display.update()\r\n x = mouseclick[1]\r\n y = mouseclick[2]\r\n print(x, ',', y)\r\n #pygame.time.delay(2000) \r\n mapCoor2 = False\r\n sleep(1)\r\n loading(screen)\r\n easySearch = True\r\n\r\n #things that happen after the next button is pressed\r\n if nextButtonPressed:\r\n sleep(1)\r\n loading(screen)\r\n user_prefList = []\r\n user_cuisineList = []\r\n user_budget = 0\r\n if halalButtonPressed:\r\n user_prefList.append(\"Halal\")\r\n if vegButtonPressed:\r\n user_prefList.append(\"Vegetarian\")\r\n if nonhalalButtonPressed:\r\n user_prefList.append(\"Non-Halal/Non-Vegetarian\")\r\n if koreanButtonPressed:\r\n user_cuisineList.append(\"Korean\")\r\n if malayButtonPressed:\r\n user_cuisineList.append(\"Malay\")\r\n if japanButtonPressed:\r\n user_cuisineList.append(\"Japanese\")\r\n if chineseButtonPressed:\r\n user_cuisineList.append(\"Chinese\")\r\n if indianButtonPressed:\r\n user_cuisineList.append(\"Indian\")\r\n if westernButtonPressed:\r\n user_cuisineList.append(\"Western\")\r\n if button3Pressed:\r\n user_budget = 3\r\n if button5Pressed:\r\n user_budget = 5\r\n if button7Pressed:\r\n user_budget = 7\r\n if button9Pressed:\r\n user_budget = 9\r\n #debug\r\n print(user_cuisineList)\r\n print(user_prefList)\r\n print(user_budget)\r\n #continue#\r\n finalID = final_list(user_budget, user_cuisineList, user_prefList)\r\n print(finalID)\r\n nearest_canteen = nearest_can(finalID, x, y)\r\n print(nearest_canteen)\r\n sleep(1)\r\n nextButtonPressed = False\r\n complicatedMenu = True\r\n \r\n",
"step-ids": [
11,
12,
15,
22,
23
]
}
|
[
11,
12,
15,
22,
23
] |
from rest_framework import serializers
from .models import *
__all__ = (
'CatalogCoinListSerializer', 'CatalogCoinSerializer', 'SeriesListSerializer', 'CoinListSerializer',
'CoinSerializer', 'CountriesListSerializer',
)
class CountriesListSerializer(serializers.ModelSerializer):
class Meta:
model = Country
fields = ('name', 'flag',)
class SeriesListSerializer(serializers.ModelSerializer):
class Meta:
model = Serie
fields = ('name',)
class CatalogCoinListSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogCoin
fields = (
'id', 'face_value', 'currency', 'country', 'year', 'theme', 'mint', 'serie', 'collection', 'exchange',
'wishlist',
)
serie = serializers.SlugRelatedField(slug_field='name', read_only=True)
collection = serializers.IntegerField(read_only=True)
exchange = serializers.IntegerField(read_only=True)
wishlist = serializers.IntegerField(read_only=True)
class CatalogCoinSerializer(serializers.ModelSerializer):
class Meta:
model = CatalogCoin
fields = '__all__'
class CoinListSerializer(serializers.ModelSerializer):
class Meta:
model = Coin
fields = ('id', 'catalog_coin', 'owner', 'status',)
catalog_coin = CatalogCoinListSerializer()
class CoinSerializer(serializers.ModelSerializer):
class Meta:
model = Coin
fields = '__all__'
|
normal
|
{
"blob_id": "b77da75b01e96ff89f873f4c5764a62cf68cd576",
"index": 217,
"step-1": "<mask token>\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-2": "<mask token>\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-3": "<mask token>\n__all__ = ('CatalogCoinListSerializer', 'CatalogCoinSerializer',\n 'SeriesListSerializer', 'CoinListSerializer', 'CoinSerializer',\n 'CountriesListSerializer')\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-4": "from rest_framework import serializers\nfrom .models import *\n__all__ = ('CatalogCoinListSerializer', 'CatalogCoinSerializer',\n 'SeriesListSerializer', 'CoinListSerializer', 'CoinSerializer',\n 'CountriesListSerializer')\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Country\n fields = 'name', 'flag'\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Serie\n fields = 'name',\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = ('id', 'face_value', 'currency', 'country', 'year',\n 'theme', 'mint', 'serie', 'collection', 'exchange', 'wishlist')\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = 'id', 'catalog_coin', 'owner', 'status'\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-5": "from rest_framework import serializers\n\nfrom .models import *\n\n__all__ = (\n 'CatalogCoinListSerializer', 'CatalogCoinSerializer', 'SeriesListSerializer', 'CoinListSerializer',\n 'CoinSerializer', 'CountriesListSerializer',\n)\n\n\nclass CountriesListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Country\n fields = ('name', 'flag',)\n\n\nclass SeriesListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Serie\n fields = ('name',)\n\n\nclass CatalogCoinListSerializer(serializers.ModelSerializer):\n class Meta:\n model = CatalogCoin\n fields = (\n 'id', 'face_value', 'currency', 'country', 'year', 'theme', 'mint', 'serie', 'collection', 'exchange',\n 'wishlist',\n )\n\n serie = serializers.SlugRelatedField(slug_field='name', read_only=True)\n collection = serializers.IntegerField(read_only=True)\n exchange = serializers.IntegerField(read_only=True)\n wishlist = serializers.IntegerField(read_only=True)\n\n\nclass CatalogCoinSerializer(serializers.ModelSerializer):\n class Meta:\n model = CatalogCoin\n fields = '__all__'\n\n\nclass CoinListSerializer(serializers.ModelSerializer):\n class Meta:\n model = Coin\n fields = ('id', 'catalog_coin', 'owner', 'status',)\n\n catalog_coin = CatalogCoinListSerializer()\n\n\nclass CoinSerializer(serializers.ModelSerializer):\n class Meta:\n model = Coin\n fields = '__all__'\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
import sys
max = sys.maxsize
print(" sys.maxsize -> ", max)
|
normal
|
{
"blob_id": "c1c79e5adc620690e4e386f7f1cd9f781eeec0ce",
"index": 6843,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(' sys.maxsize -> ', max)\n",
"step-3": "<mask token>\nmax = sys.maxsize\nprint(' sys.maxsize -> ', max)\n",
"step-4": "import sys\nmax = sys.maxsize\nprint(' sys.maxsize -> ', max)\n",
"step-5": "import sys\n\nmax = sys.maxsize\nprint(\" sys.maxsize -> \", max)\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# class User:
# def __init__(self, name_parameter, email_parameter):
# self.nameofPerson = name_parameter
# self.emailofPerson = email_parameter
# self.account_balance = 0
# def depositMoney(self, amount);
# self.account_balance += amount
# return self
# def transferMoney(self, otherUser, amount):
# self.account_balance -= 5
# otherUser.account_balance += 5
# return self
# To allow user1, user2 or user3 overdraw their account
# def withdrawMoney_overdraw(self, amount):
# self.account_balance -= amount
# To not allow user1, user2, user3 overdraw their account
# def withdrawMoney_no_overdraw(self, amount):
# if self.account_balance > amount:
# self.account_balance -= amount
# else:
# print("insufficient funds")
# user1 = User("Ben", "[email protected]")
# user2 = User("Tom", "[email protected]")
# user3 = User("Sarah", "[email protected]")
# print(user1.nameofPerson)
# prints the name of the user1
# print(user2.emailfPerson)
# prints the email of the user2
# print(user1.account_balance)
# prints the account balance of the user3 which in this case is 0 by default according to the class User
# user1.depositMoney(50)
# print(user1.account_balance)
# prints the account balance of user1 which by default is 0 and then adds the function depositMoney which is giving an arguemnt of 50 (0 + 50 / account_balance + depositMoney) The output is ($50)
# user1.transferMoney(user 2, 5)
# print(user2.account_balance)
# print(user1.account_balance)
# prints user1 account balance which is now 50 then transfers money to user2 (50 - 5) which is now 5 to be added to the default account balance of 0 (0 + 5 / account_balance + transferMoney from user1)
# Also user1 account_balance (50 - 5) which is now 45 ($45)
# user1.depositMoney(50).depositMoney(30).transferMoney(user2, 5)
# print(user1.account_balance)
# prints user1 account banlace (50 + 30) which is 80 ($80), assuming user1 depositedMoney twice. we use "return self" at the end of the declared functions to add a "chain method" of repeating a chain of function or various types of functions, i.e repeating a particular function for user1 as many times as possible or even adding other functions to modify the final account balance for user 1)
# The final output for account balance user1 will be (80 - 50) which is 75 ($75) because we transfered money 5 (80 - 5) to user2 at the end in the "chain mathod of functions" for user1. This will only work since we added "return self"and this means (updating all the chain methods to the very last function of command in the declared function which in this case we tranfered 5 from user1 to user2)
# user1.withdrawMoney_overdraw(100)
# print(user1.account_balance)
# prints user1 current account balance which is currently 75 and then withdraws 100 which means (75 - 100) which is -25.
# the new user1 account balance is ( -25 which is -$25)
# The above assuming user1 is allowed to overdraw their account
# user1.withdrawMoney_no_overdraw(100)
# print(user1.account_balance)
# prints "insufficient funds" for user1 since user1 current account balance which is currently 75 and then wants to withdraw 100 which means (75 - 100) but is not allowed to because user1 still needs an additional 25 to fulfil the withdrawMoney function of 100. we give a conditional statement in our def withdrawMoney_no_overdraw above saying if user1 account balance is greater than amount allow user1 to withdraw money if not do not allow user1 to redraw money instead give "insuffiecient funds" (if 75 is greater than 100 which in this case is false go to the else statement which is "insufficient funds")
# The above assuming user1 is not allowed to overdraw their account if account balance for user1 is not greater than the withdraw amount and then user1 will get a message "insufficient funds"
|
normal
|
{
"blob_id": "c69dcffc06146af610a7976e522b6e35cabde1aa",
"index": 3050,
"step-1": "# class User:\n# def __init__(self, name_parameter, email_parameter):\n# self.nameofPerson = name_parameter\n# self.emailofPerson = email_parameter\n# self.account_balance = 0\n\n# def depositMoney(self, amount);\n# self.account_balance += amount\n# return self\n\n# def transferMoney(self, otherUser, amount):\n# self.account_balance -= 5\n# otherUser.account_balance += 5\n# return self\n\n# To allow user1, user2 or user3 overdraw their account\n # def withdrawMoney_overdraw(self, amount):\n # self.account_balance -= amount\n\n# To not allow user1, user2, user3 overdraw their account\n# def withdrawMoney_no_overdraw(self, amount):\n# if self.account_balance > amount:\n# self.account_balance -= amount\n# else:\n# print(\"insufficient funds\")\n\n\n# user1 = User(\"Ben\", \"[email protected]\")\n# user2 = User(\"Tom\", \"[email protected]\")\n# user3 = User(\"Sarah\", \"[email protected]\")\n\n# print(user1.nameofPerson)\n# prints the name of the user1\n\n# print(user2.emailfPerson)\n# prints the email of the user2\n\n# print(user1.account_balance)\n# prints the account balance of the user3 which in this case is 0 by default according to the class User\n\n# user1.depositMoney(50)\n# print(user1.account_balance)\n# prints the account balance of user1 which by default is 0 and then adds the function depositMoney which is giving an arguemnt of 50 (0 + 50 / account_balance + depositMoney) The output is ($50)\n\n# user1.transferMoney(user 2, 5)\n# print(user2.account_balance)\n# print(user1.account_balance)\n# prints user1 account balance which is now 50 then transfers money to user2 (50 - 5) which is now 5 to be added to the default account balance of 0 (0 + 5 / account_balance + transferMoney from user1)\n# Also user1 account_balance (50 - 5) which is now 45 ($45)\n\n# user1.depositMoney(50).depositMoney(30).transferMoney(user2, 5)\n# print(user1.account_balance)\n# prints user1 account banlace (50 + 30) which is 80 ($80), assuming user1 depositedMoney twice. we use \"return self\" at the end of the declared functions to add a \"chain method\" of repeating a chain of function or various types of functions, i.e repeating a particular function for user1 as many times as possible or even adding other functions to modify the final account balance for user 1)\n# The final output for account balance user1 will be (80 - 50) which is 75 ($75) because we transfered money 5 (80 - 5) to user2 at the end in the \"chain mathod of functions\" for user1. This will only work since we added \"return self\"and this means (updating all the chain methods to the very last function of command in the declared function which in this case we tranfered 5 from user1 to user2)\n\n# user1.withdrawMoney_overdraw(100)\n# print(user1.account_balance)\n# prints user1 current account balance which is currently 75 and then withdraws 100 which means (75 - 100) which is -25.\n# the new user1 account balance is ( -25 which is -$25)\n# The above assuming user1 is allowed to overdraw their account\n\n# user1.withdrawMoney_no_overdraw(100)\n# print(user1.account_balance)\n# prints \"insufficient funds\" for user1 since user1 current account balance which is currently 75 and then wants to withdraw 100 which means (75 - 100) but is not allowed to because user1 still needs an additional 25 to fulfil the withdrawMoney function of 100. we give a conditional statement in our def withdrawMoney_no_overdraw above saying if user1 account balance is greater than amount allow user1 to withdraw money if not do not allow user1 to redraw money instead give \"insuffiecient funds\" (if 75 is greater than 100 which in this case is false go to the else statement which is \"insufficient funds\")\n# The above assuming user1 is not allowed to overdraw their account if account balance for user1 is not greater than the withdraw amount and then user1 will get a message \"insufficient funds\"\n\n\n\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
import pytest
def test_template():
assert True
|
normal
|
{
"blob_id": "e7fa84dbc037253c7f852aa618e6ea88d1fda909",
"index": 1939,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_template():\n assert True\n",
"step-3": "import pytest\n\n\ndef test_template():\n assert True\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import matplotlib.pyplot as plt
def xyplot(xdata,ydata,title):
fname = "/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png"
#plt.figure(figsize=(500,500))
plt.plot(xdata, ydata)
plt.ylabel('some numbers')
# plt.savefig("/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png", format='png')
#plt.show()
#plt.savefig("/Users/nalmog/Desktop/swa_equipped_cumulative_"+title+".png", format='png')
plt.title(title)
plt.xlabel("Percent of Fleet")
plt.ylabel("Number of Passes")
plt.savefig(fname)
plt.clf();
#plt.
|
normal
|
{
"blob_id": "10a7c1827abb8a87f5965453aa2d8f5e8b4914e5",
"index": 6563,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef xyplot(xdata, ydata, title):\n fname = '/Users/nalmog/Desktop/swa_equipped_cumulative_' + title + '.png'\n plt.plot(xdata, ydata)\n plt.ylabel('some numbers')\n plt.title(title)\n plt.xlabel('Percent of Fleet')\n plt.ylabel('Number of Passes')\n plt.savefig(fname)\n plt.clf()\n",
"step-3": "import matplotlib.pyplot as plt\n\n\ndef xyplot(xdata, ydata, title):\n fname = '/Users/nalmog/Desktop/swa_equipped_cumulative_' + title + '.png'\n plt.plot(xdata, ydata)\n plt.ylabel('some numbers')\n plt.title(title)\n plt.xlabel('Percent of Fleet')\n plt.ylabel('Number of Passes')\n plt.savefig(fname)\n plt.clf()\n",
"step-4": "import matplotlib.pyplot as plt\n\ndef xyplot(xdata,ydata,title):\n fname = \"/Users/nalmog/Desktop/swa_equipped_cumulative_\"+title+\".png\"\n #plt.figure(figsize=(500,500))\n plt.plot(xdata, ydata)\n plt.ylabel('some numbers') \n# plt.savefig(\"/Users/nalmog/Desktop/swa_equipped_cumulative_\"+title+\".png\", format='png')\n #plt.show()\n #plt.savefig(\"/Users/nalmog/Desktop/swa_equipped_cumulative_\"+title+\".png\", format='png')\n plt.title(title) \n plt.xlabel(\"Percent of Fleet\")\n plt.ylabel(\"Number of Passes\")\n plt.savefig(fname)\n plt.clf();\n #plt.\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:utf-8 -*-
"""
Author:xufei
Date:2021/1/21
"""
|
normal
|
{
"blob_id": "d39e3a552a7c558d3f5b410e0b228fb7409d732a",
"index": 928,
"step-1": "<mask token>\n",
"step-2": "# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:xufei\nDate:2021/1/21\n\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
This module takes care of starting the API Server, Loading the DB and Adding the endpoints
"""
import os
from flask import Flask, request, jsonify, url_for
from flask_migrate import Migrate
from flask_swagger import swagger
from flask_cors import CORS
from flask_jwt_extended import (
JWTManager, jwt_required, create_access_token, create_refresh_token,
get_jwt_identity*
)
from utils import APIException, generate_sitemap
from models import db
from models import User
from passlib.hash import pbkdf2_sha256 as sha256
app = Flask(__name__)
app.url_map.strict_slashes = False
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600
MIGRATE = Migrate(app, db)
db.init_app(app)
CORS(app)
jwt = JWTManager(app)*
# Handle/serialize errors like a JSON object
@app.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
# generate sitemap with all your endpoints
@app.route('/')
def sitemap():
return generate_sitemap(app)
@app.route('/hello', methods=['POST', 'GET'])
@jwt_required
def handle_hello():
current_user = get_jwt_identity()
response_body = {
"hello": current_user
}
return jsonify(response_body), 200
@app.route('/login', methods=['POST'])*
def handle_login():
data = request.json
user = User.query.filter_by(username = data["username"]).first()
if user is None:
return jsonify ({
"error": "el usuario no existe"
}), 404
if sha256.verify(data["password"], user.password):
mivariable = create_access_token(identity=data["username"])
refresh = create_refresh_token(identity=data["username"])
return jsonify ({
"token": mivariable,
"refresh": refresh
}), 200
return jsonify ({
"error":"la contraseña no es valida"
}), 404
@app.route('/register', methods=['POST'])*
def handle_register():
data = request.json
user = User()
user.username = data["username"]
user.mail = data["mail"]
user.password = sha256.hash(data["password"])
db.session.add(user)
db.session.commit()
return jsonify(user.serialize()), 200
# this only runs if `$ python src/main.py` is executed
if __name__ == '__main__':
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT, debug=False)
|
normal
|
{
"blob_id": "36d596c1019dbaaf8dc394633ca464421517dc21",
"index": 3381,
"step-1": "\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nimport os\nfrom flask import Flask, request, jsonify, url_for\nfrom flask_migrate import Migrate\nfrom flask_swagger import swagger\nfrom flask_cors import CORS\nfrom flask_jwt_extended import (\n JWTManager, jwt_required, create_access_token, create_refresh_token,\n get_jwt_identity*\n)\nfrom utils import APIException, generate_sitemap\nfrom models import db\nfrom models import User\n\n\nfrom passlib.hash import pbkdf2_sha256 as sha256\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\napp.config['JWT_SECRET_KEY'] = os.environ.get('SECRET_KEY')\napp.config['JWT_ACCESS_TOKEN_EXPIRES'] = 3600\n\nMIGRATE = Migrate(app, db)\ndb.init_app(app)\nCORS(app)\n\njwt = JWTManager(app)*\n\n\n# Handle/serialize errors like a JSON object\[email protected](APIException)\ndef handle_invalid_usage(error):\n return jsonify(error.to_dict()), error.status_code\n\n# generate sitemap with all your endpoints\[email protected]('/')\ndef sitemap():\n return generate_sitemap(app)\n\[email protected]('/hello', methods=['POST', 'GET'])\n@jwt_required\ndef handle_hello():\n current_user = get_jwt_identity()\n response_body = {\n \"hello\": current_user\n }\n\n return jsonify(response_body), 200\n\[email protected]('/login', methods=['POST'])*\ndef handle_login():\n data = request.json\n user = User.query.filter_by(username = data[\"username\"]).first()\n if user is None:\n return jsonify ({\n \"error\": \"el usuario no existe\"\n }), 404\n if sha256.verify(data[\"password\"], user.password):\n\n mivariable = create_access_token(identity=data[\"username\"])\n refresh = create_refresh_token(identity=data[\"username\"])\n return jsonify ({\n \"token\": mivariable,\n \"refresh\": refresh\n }), 200\n\n return jsonify ({\n \"error\":\"la contraseña no es valida\"\n }), 404\n\[email protected]('/register', methods=['POST'])*\ndef handle_register():\n data = request.json\n\n user = User()\n user.username = data[\"username\"]\n user.mail = data[\"mail\"]\n user.password = sha256.hash(data[\"password\"])\n\n db.session.add(user)\n db.session.commit()\n\n return jsonify(user.serialize()), 200\n\n# this only runs if `$ python src/main.py` is executed\nif __name__ == '__main__':\n PORT = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=PORT, debug=False)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from . import views
from django.conf.urls import url,re_path
enquiryUrlPattern = [
url(r'daily-rate-enquiry', views.daily_rate_enquiry_form),
re_path(r'^contact-us-landing-page/$', views.contact_us_landing_page),
]
|
normal
|
{
"blob_id": "ccf1710cff972eaa06e1ccb5ebedc70d946e3215",
"index": 4906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nenquiryUrlPattern = [url('daily-rate-enquiry', views.\n daily_rate_enquiry_form), re_path('^contact-us-landing-page/$', views.\n contact_us_landing_page)]\n",
"step-3": "from . import views\nfrom django.conf.urls import url, re_path\nenquiryUrlPattern = [url('daily-rate-enquiry', views.\n daily_rate_enquiry_form), re_path('^contact-us-landing-page/$', views.\n contact_us_landing_page)]\n",
"step-4": "from . import views\nfrom django.conf.urls import url,re_path\n\nenquiryUrlPattern = [\n url(r'daily-rate-enquiry', views.daily_rate_enquiry_form),\n re_path(r'^contact-us-landing-page/$', views.contact_us_landing_page),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from scipy.io import loadmat
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import copy
from matplotlib import cm
from matplotlib.animation import FuncAnimation
import scipy.optimize
import networkx as nx
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# task 13
# Загрузите данные spamTrain.mat из файла.
train_data = loadmat('data/spamTrain.mat')
x = train_data["X"]
y = train_data["y"]
test_data = loadmat('data/spamTest.mat')
x_test = test_data["Xtest"]
y_test = test_data["ytest"]
def vector_to_message(vector):
vocab_file = open("data/vocab.txt", "r")
vocab = vocab_file.readlines()
# one_hot = [int(record.split()[1] in message) for record in vocab]
message_words = []
for vocab_record, vector_enterance in zip(vocab, vector):
is_trigger_word = bool(vector_enterance)
word = vocab_record.split()[1]
if is_trigger_word:
message_words.append(word)
return " ".join(message_words)
message = vector_to_message(x_test[0])
def one_hot_convert(message):
message_words = message.split()
message_words.sort()
vocab_file = open("data/vocab.txt", "r")
vocab = vocab_file.readlines()
# one_hot = [int(record.split()[1] in message) for record in vocab]
one_hot = []
for record in vocab:
word = record.split()[1]
one_hot.append(int(word in message_words))
pass
return np.array([one_hot])
one_hot_convert()
|
normal
|
{
"blob_id": "f5820824b5b7e473b79b5dfee2f203684c3755be",
"index": 5154,
"step-1": "<mask token>\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\n<mask token>\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\n<mask token>\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-3": "<mask token>\ntrain_data = loadmat('data/spamTrain.mat')\nx = train_data['X']\ny = train_data['y']\ntest_data = loadmat('data/spamTest.mat')\nx_test = test_data['Xtest']\ny_test = test_data['ytest']\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.io import loadmat\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom matplotlib import cm\nfrom matplotlib.animation import FuncAnimation\nimport scipy.optimize\nimport networkx as nx\nfrom sklearn import svm\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\ntrain_data = loadmat('data/spamTrain.mat')\nx = train_data['X']\ny = train_data['y']\ntest_data = loadmat('data/spamTest.mat')\nx_test = test_data['Xtest']\ny_test = test_data['ytest']\n\n\ndef vector_to_message(vector):\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return ' '.join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open('data/vocab.txt', 'r')\n vocab = vocab_file.readlines()\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\n\n\none_hot_convert()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom scipy.io import loadmat\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport copy\nfrom matplotlib import cm\nfrom matplotlib.animation import FuncAnimation\nimport scipy.optimize\nimport networkx as nx\nfrom sklearn import svm\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, datasets\n\n# task 13\n# Загрузите данные spamTrain.mat из файла.\n\ntrain_data = loadmat('data/spamTrain.mat')\n\nx = train_data[\"X\"]\ny = train_data[\"y\"]\n\ntest_data = loadmat('data/spamTest.mat')\n\nx_test = test_data[\"Xtest\"]\ny_test = test_data[\"ytest\"]\n\n\ndef vector_to_message(vector):\n vocab_file = open(\"data/vocab.txt\", \"r\")\n vocab = vocab_file.readlines()\n # one_hot = [int(record.split()[1] in message) for record in vocab]\n message_words = []\n for vocab_record, vector_enterance in zip(vocab, vector):\n is_trigger_word = bool(vector_enterance)\n word = vocab_record.split()[1]\n if is_trigger_word:\n message_words.append(word)\n return \" \".join(message_words)\n\n\nmessage = vector_to_message(x_test[0])\n\n\ndef one_hot_convert(message):\n message_words = message.split()\n message_words.sort()\n vocab_file = open(\"data/vocab.txt\", \"r\")\n vocab = vocab_file.readlines()\n # one_hot = [int(record.split()[1] in message) for record in vocab]\n one_hot = []\n for record in vocab:\n word = record.split()[1]\n one_hot.append(int(word in message_words))\n pass\n return np.array([one_hot])\none_hot_convert()",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
from unittest import TestCase
from pyfibre.gui.file_display_pane import FileDisplayPane
from pyfibre.tests.fixtures import (
directory,
test_image_path)
from pyfibre.tests.probe_classes.parsers import ProbeParser
from pyfibre.tests.probe_classes.readers import ProbeMultiImageReader
source_dir = os.path.dirname(os.path.realpath(__file__))
pyfibre_dir = os.path.dirname(os.path.dirname(source_dir))
class TestFileDisplayPane(TestCase):
def setUp(self):
self.file_display = FileDisplayPane(
supported_readers={'Probe': ProbeMultiImageReader()},
supported_parsers={'Probe': ProbeParser()}
)
self.file_path = test_image_path
def test_add_file(self):
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
self.file_display.add_files(test_image_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_add_directory(self):
self.file_display.add_files(directory)
self.assertEqual(1, len(self.file_display.file_table))
table_row = self.file_display.file_table[0]
self.assertEqual('/path/to/some/file', table_row.name)
self.assertEqual('Probe', table_row.tag)
self.assertDictEqual(
{'Probe': test_image_path},
table_row.file_set.registry)
def test_remove_file(self):
self.file_display.add_files(self.file_path)
self.file_display.remove_file(
[self.file_display.file_table[0]])
self.assertEqual(0, len(self.file_display.file_table))
self.file_display.add_files(self.file_path)
self.assertEqual(1, len(self.file_display.file_table))
def test_filter_files(self):
self.file_display.add_files(self.file_path)
self.file_display.filter_files('some')
self.assertEqual(1, len(self.file_display.file_table))
self.file_display.filter_files('sci-pyfibre')
self.assertEqual(0, len(self.file_display.file_table))
|
normal
|
{
"blob_id": "7c65d0bdd4fd808b3d87706357a651601368e43b",
"index": 8596,
"step-1": "<mask token>\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n <mask token>\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-2": "<mask token>\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-3": "<mask token>\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-4": "import os\nfrom unittest import TestCase\nfrom pyfibre.gui.file_display_pane import FileDisplayPane\nfrom pyfibre.tests.fixtures import directory, test_image_path\nfrom pyfibre.tests.probe_classes.parsers import ProbeParser\nfrom pyfibre.tests.probe_classes.readers import ProbeMultiImageReader\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n self.file_display = FileDisplayPane(supported_readers={'Probe':\n ProbeMultiImageReader()}, supported_parsers={'Probe':\n ProbeParser()})\n self.file_path = test_image_path\n\n def test_add_file(self):\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual({'Probe': test_image_path}, table_row.file_set\n .registry)\n\n def test_remove_file(self):\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file([self.file_display.file_table[0]])\n self.assertEqual(0, len(self.file_display.file_table))\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n self.assertEqual(1, len(self.file_display.file_table))\n self.file_display.filter_files('sci-pyfibre')\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-5": "import os\nfrom unittest import TestCase\n\nfrom pyfibre.gui.file_display_pane import FileDisplayPane\nfrom pyfibre.tests.fixtures import (\n directory,\n test_image_path)\nfrom pyfibre.tests.probe_classes.parsers import ProbeParser\nfrom pyfibre.tests.probe_classes.readers import ProbeMultiImageReader\n\nsource_dir = os.path.dirname(os.path.realpath(__file__))\npyfibre_dir = os.path.dirname(os.path.dirname(source_dir))\n\n\nclass TestFileDisplayPane(TestCase):\n\n def setUp(self):\n\n self.file_display = FileDisplayPane(\n supported_readers={'Probe': ProbeMultiImageReader()},\n supported_parsers={'Probe': ProbeParser()}\n )\n self.file_path = test_image_path\n\n def test_add_file(self):\n\n self.file_display.add_files(self.file_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual(\n {'Probe': test_image_path},\n table_row.file_set.registry)\n\n self.file_display.add_files(test_image_path)\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_add_directory(self):\n\n self.file_display.add_files(directory)\n self.assertEqual(1, len(self.file_display.file_table))\n\n table_row = self.file_display.file_table[0]\n self.assertEqual('/path/to/some/file', table_row.name)\n self.assertEqual('Probe', table_row.tag)\n self.assertDictEqual(\n {'Probe': test_image_path},\n table_row.file_set.registry)\n\n def test_remove_file(self):\n\n self.file_display.add_files(self.file_path)\n self.file_display.remove_file(\n [self.file_display.file_table[0]])\n\n self.assertEqual(0, len(self.file_display.file_table))\n\n self.file_display.add_files(self.file_path)\n\n self.assertEqual(1, len(self.file_display.file_table))\n\n def test_filter_files(self):\n\n self.file_display.add_files(self.file_path)\n self.file_display.filter_files('some')\n\n self.assertEqual(1, len(self.file_display.file_table))\n\n self.file_display.filter_files('sci-pyfibre')\n\n self.assertEqual(0, len(self.file_display.file_table))\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse
from .models import Post
from django.utils import timezone
def list_of_posts(request):
posts = (Post.objects
.filter(published_date__lte=timezone.now())
.order_by('published_date')
)
return render(request, 'blog/list_of_posts.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request,
'blog/post_detail.html',
{'post': post}
)
|
normal
|
{
"blob_id": "71a0900dc09b1ff55e4e5a4cc7cab617b9c73406",
"index": 4519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-3": "<mask token>\n\n\ndef list_of_posts(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by(\n 'published_date')\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom .models import Post\nfrom django.utils import timezone\n\n\ndef list_of_posts(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by(\n 'published_date')\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n return render(request, 'blog/post_detail.html', {'post': post})\n",
"step-5": "from django.shortcuts import render, get_object_or_404\n\n# Create your views here.\n\nfrom django.http import HttpResponse\nfrom .models import Post\nfrom django.utils import timezone\n\ndef list_of_posts(request):\n posts = (Post.objects\n .filter(published_date__lte=timezone.now())\n .order_by('published_date')\n )\n return render(request, 'blog/list_of_posts.html', {'posts': posts})\n\ndef post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n\n return render(request,\n 'blog/post_detail.html',\n {'post': post}\n )\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
from scipy import misc
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import time
import math
import cv2
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from keras.models import Sequential
from keras.layers import Lambda
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
from keras.models import load_model
# Data augmentation constants
TRANS_X_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_Y_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)
TRANS_ANGLE = .3 # Maximum angle change when translating in the X direction
OFF_CENTER_IMG = .25 # Angle change when using off center images
DOWNSAMPLE_RATIO = 0.99
Learning_Rate = 0.0001
FOLDER = "examples/"
EPOCHS = 4
TRAINABLE = True
BRIGHTNESS_RANGE = 0.15
IMG_ROWS = 300
IMG_COLS = 300
SHAPE = (IMG_ROWS,IMG_COLS,3)
SAMPLES_TRAIN = 5000
SAMPLES_VALIDATION = 1000
def load_data(data):
temp = []
for i in range(len(data)):
im = cv2.imread(data[i])
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
# im = color_change(im)
temp.append(im)
return temp
def normalize(data):
a=-0.5
b=0.5
greyscale_min=0
greyscale_max=255
return a + ( ( (data - greyscale_min)*(b - a) )/(greyscale_max - greyscale_min))
def color_change(data):
x = cv2.cvtColor(data,cv2.COLOR_BGR2HSV)
return x
def adjust_brightness(im):
temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
# Compute a random brightness value and apply to the image
brightness = BRIGHTNESS_RANGE * np.random.uniform(-1,1)
temp[:, :, 2] = temp[:, :, 2] * (1-brightness)
# Convert back to RGB and return
return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)
def img_translate(img, angle):
# Randomly form the X translation distance and compute the resulting steering angle change
change = np.random.uniform(-0.5,0.5)
x_translation = (TRANS_X_RANGE * change)
new_angle = angle + (change * TRANS_ANGLE)
# Randomly compute a Y translation
y_translation = (TRANS_Y_RANGE * np.random.uniform(-0.5,0.5))
# Form the translation matrix
translation_matrix = np.float32([[1, 0, x_translation], [0, 1, y_translation]])
# Translate the image
return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])),new_angle
def crop(im):
shape = np.array(im).shape
y1 = int(shape[0]*0.4)
y2 = int(shape[0]*0.87)
# print(y)
im = im[y1:y2 , : , :]
im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)
return im
def curve_focus(xdata,ydata):
count = 0
for x in range(len(xdata)):
if(ydata[x]==0.000):
count+=1
print("Total = {}\n0 Steering = {}".format(len(xdata),count))
return xdata,ydata
def flip(xdata,ydata):
for x in range(len(xdata)):
flip = np.fliplr(xdata[x])
xdata = np.append(xdata, [flip], axis=0)
ydata = np.append(ydata, (-1*ydata[x]))
return xdata,ydata
def set_model():
model = Sequential()
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=SHAPE,
output_shape=SHAPE))
model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))
model.add(Convolution2D(36,5,5,border_mode='same',activation="elu",
name='conv1'))
model.add(Convolution2D(48,3,3,activation="elu",border_mode='same',
name='conv2'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same',
name='conv3'))
model.add(Convolution2D(64,3,3,activation="elu",border_mode='same', name='conv4'))
model.add(Flatten(name='flat1'))
# model.add(Dropout(0.2))
# model.add(Dense(1164, activation="elu"))
# model.add(Dropout(.3, name='drop1'))
model.add(Dense(100, activation="elu", name='dense1'))
model.add(Dense(50, activation="elu", name='dense2'))
model.add(Dense(10, activation="elu", name='dense3'))
model.add(Dense(1, activation="linear", name='dense4'))
return model
def my_range(start, end, step):
while start <= end:
yield round(start,1)
start += step
def show_data(log):
fig = plt.figure(figsize=(8,2))
a = fig.add_subplot(1,2,1)
im = cv2.imread(FOLDER+log[560,0].strip())
im = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
a.set_title("Full Resolution")
plt.axis('off')
plt.imshow(im)
im = misc.imresize(im,size=0.2)
a = fig.add_subplot(1,2,2)
a.set_title("After 80% Downsampling")
plt.imshow(im)
# im = crop(im)
# im, an = process_line(log[600])
# a = fig.add_subplot(2,1,2)
# im, an = process_line(log[600])
# plt.imshow(im,aspect="auto",interpolation="nearest")
plt.axis('off')
fig.savefig('examples/Downsampling.png')
plt.show()
exit()
# plt.hist(steer,bins=100)
# plt.show()
# exit()
count = 1
y = 0
steer = log[:,3]
for x in my_range(-0.8,0.7,0.1):
while 1:
y = np.random.randint(len(steer))
if(round(steer[y],1)==x):
print("Found {}",(x))
break
# else:
# print("Discarded {}",steer[y])
a=fig.add_subplot(4,5,count)
im = cv2.imread(FOLDER+log[y,0])
im,angle = process_line(log[y])
a.set_title(str(x)+" to "+str(round(angle,1)))
plt.imshow(im,aspect="auto",interpolation="nearest")
count+=1
# print(x)
plt.show()
exit()
pic = np.random.randint(len(X_train))
print(X_train.shape)
plt.imshow(X_train[pic])
plt.show()
exit()
def augment(x,y):
x,y = flip(x,y)
return x,y
def process_line(sample):
img_choice = np.random.randint(3)
angle = 0.0
if(img_choice==0):
angle = float(sample[3])
elif(img_choice==1):
angle = float(sample[3])+0.27
elif(img_choice==2):
angle = float(sample[3])-0.27
im = cv2.imread(FOLDER+sample[img_choice].strip())
im = misc.imresize(im,size=DOWNSAMPLE_RATIO)
im = crop(im)
im = adjust_brightness(im)
im,angle = img_translate(im,angle)
# im = normalize(im)
return im,angle
def generator(samples, batch_size=32):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
num_samples = len(samples)
shuffle(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
image,angle = process_line(batch_sample)
images.append(image)
angles.append(angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
X_train, y_train = augment(X_train,y_train)
yield shuffle(X_train, y_train)
if __name__ == "__main__":
log = pd.read_csv(FOLDER+"driving_log.csv").values
show_data(log)
print(log.shape)
train_samples, validation_samples = train_test_split(log,test_size=0.2)
im,an = process_line(train_samples[np.random.randint(len(train_samples))])
print(np.array(im).shape)
# plt.imshow(im)
# plt.title(str(an))
# plt.show()
# exit()
model = set_model()
# model.load_weights('weights.h5',by_name=True)
adam = Adam(lr=Learning_Rate)
model.compile(optimizer = adam, loss = 'mean_squared_error')
history=model.fit_generator(generator(train_samples), samples_per_epoch =
SAMPLES_TRAIN, validation_data=generator(validation_samples),
nb_val_samples=SAMPLES_VALIDATION, nb_epoch=EPOCHS, verbose=1)
model.save_weights('weights.h5')
model.save('model.h5')
print("Model saved")
def for_drive(im):
print(im.shape)
x = im
x = misc.imresize(x,size=DOWNSAMPLE_RATIO)
x = crop(x)
# plt.imshow(x)
# plt.show()
# x = color_change(x)
# x = normalize(x)
return x
|
normal
|
{
"blob_id": "b109568c4dba05b16cbed1759a2b9e0a99babc67",
"index": 2982,
"step-1": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\n<mask token>\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\n<mask token>\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\n<mask token>\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\n<mask token>\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\n<mask token>\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\n<mask token>\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\n<mask token>\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\ndef flip(xdata, ydata):\n for x in range(len(xdata)):\n flip = np.fliplr(xdata[x])\n xdata = np.append(xdata, [flip], axis=0)\n ydata = np.append(ydata, -1 * ydata[x])\n return xdata, ydata\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\ndef my_range(start, end, step):\n while start <= end:\n yield round(start, 1)\n start += step\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\n<mask token>\n",
"step-4": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\n\n\ndef load_data(data):\n temp = []\n for i in range(len(data)):\n im = cv2.imread(data[i])\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n temp.append(im)\n return temp\n\n\ndef normalize(data):\n a = -0.5\n b = 0.5\n greyscale_min = 0\n greyscale_max = 255\n return a + (data - greyscale_min) * (b - a) / (greyscale_max -\n greyscale_min)\n\n\ndef color_change(data):\n x = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)\n return x\n\n\ndef adjust_brightness(im):\n temp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n brightness = BRIGHTNESS_RANGE * np.random.uniform(-1, 1)\n temp[:, :, 2] = temp[:, :, 2] * (1 - brightness)\n return cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)\n\n\ndef img_translate(img, angle):\n change = np.random.uniform(-0.5, 0.5)\n x_translation = TRANS_X_RANGE * change\n new_angle = angle + change * TRANS_ANGLE\n y_translation = TRANS_Y_RANGE * np.random.uniform(-0.5, 0.5)\n translation_matrix = np.float32([[1, 0, x_translation], [0, 1,\n y_translation]])\n return cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])\n ), new_angle\n\n\ndef crop(im):\n shape = np.array(im).shape\n y1 = int(shape[0] * 0.4)\n y2 = int(shape[0] * 0.87)\n im = im[y1:y2, :, :]\n im = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)\n return im\n\n\ndef curve_focus(xdata, ydata):\n count = 0\n for x in range(len(xdata)):\n if ydata[x] == 0.0:\n count += 1\n print('Total = {}\\n0 Steering = {}'.format(len(xdata), count))\n return xdata, ydata\n\n\ndef flip(xdata, ydata):\n for x in range(len(xdata)):\n flip = np.fliplr(xdata[x])\n xdata = np.append(xdata, [flip], axis=0)\n ydata = np.append(ydata, -1 * ydata[x])\n return xdata, ydata\n\n\ndef set_model():\n model = Sequential()\n model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=SHAPE,\n output_shape=SHAPE))\n model.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n model.add(Convolution2D(36, 5, 5, border_mode='same', activation='elu',\n name='conv1'))\n model.add(Convolution2D(48, 3, 3, activation='elu', border_mode='same',\n name='conv2'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv3'))\n model.add(Convolution2D(64, 3, 3, activation='elu', border_mode='same',\n name='conv4'))\n model.add(Flatten(name='flat1'))\n model.add(Dense(100, activation='elu', name='dense1'))\n model.add(Dense(50, activation='elu', name='dense2'))\n model.add(Dense(10, activation='elu', name='dense3'))\n model.add(Dense(1, activation='linear', name='dense4'))\n return model\n\n\ndef my_range(start, end, step):\n while start <= end:\n yield round(start, 1)\n start += step\n\n\ndef show_data(log):\n fig = plt.figure(figsize=(8, 2))\n a = fig.add_subplot(1, 2, 1)\n im = cv2.imread(FOLDER + log[560, 0].strip())\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n a.set_title('Full Resolution')\n plt.axis('off')\n plt.imshow(im)\n im = misc.imresize(im, size=0.2)\n a = fig.add_subplot(1, 2, 2)\n a.set_title('After 80% Downsampling')\n plt.imshow(im)\n plt.axis('off')\n fig.savefig('examples/Downsampling.png')\n plt.show()\n exit()\n count = 1\n y = 0\n steer = log[:, 3]\n for x in my_range(-0.8, 0.7, 0.1):\n while 1:\n y = np.random.randint(len(steer))\n if round(steer[y], 1) == x:\n print('Found {}', x)\n break\n a = fig.add_subplot(4, 5, count)\n im = cv2.imread(FOLDER + log[y, 0])\n im, angle = process_line(log[y])\n a.set_title(str(x) + ' to ' + str(round(angle, 1)))\n plt.imshow(im, aspect='auto', interpolation='nearest')\n count += 1\n plt.show()\n exit()\n pic = np.random.randint(len(X_train))\n print(X_train.shape)\n plt.imshow(X_train[pic])\n plt.show()\n exit()\n\n\ndef augment(x, y):\n x, y = flip(x, y)\n return x, y\n\n\ndef process_line(sample):\n img_choice = np.random.randint(3)\n angle = 0.0\n if img_choice == 0:\n angle = float(sample[3])\n elif img_choice == 1:\n angle = float(sample[3]) + 0.27\n elif img_choice == 2:\n angle = float(sample[3]) - 0.27\n im = cv2.imread(FOLDER + sample[img_choice].strip())\n im = misc.imresize(im, size=DOWNSAMPLE_RATIO)\n im = crop(im)\n im = adjust_brightness(im)\n im, angle = img_translate(im, angle)\n return im, angle\n\n\ndef generator(samples, batch_size=32):\n \"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n num_samples = len(samples)\n shuffle(samples)\n while 1:\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n image, angle = process_line(batch_sample)\n images.append(image)\n angles.append(angle)\n X_train = np.array(images)\n y_train = np.array(angles)\n X_train, y_train = augment(X_train, y_train)\n yield shuffle(X_train, y_train)\n\n\nif __name__ == '__main__':\n log = pd.read_csv(FOLDER + 'driving_log.csv').values\n show_data(log)\n print(log.shape)\n train_samples, validation_samples = train_test_split(log, test_size=0.2)\n im, an = process_line(train_samples[np.random.randint(len(train_samples))])\n print(np.array(im).shape)\n model = set_model()\n adam = Adam(lr=Learning_Rate)\n model.compile(optimizer=adam, loss='mean_squared_error')\n history = model.fit_generator(generator(train_samples),\n samples_per_epoch=SAMPLES_TRAIN, validation_data=generator(\n validation_samples), nb_val_samples=SAMPLES_VALIDATION, nb_epoch=\n EPOCHS, verbose=1)\n model.save_weights('weights.h5')\n model.save('model.h5')\n print('Model saved')\n\n\ndef for_drive(im):\n print(im.shape)\n x = im\n x = misc.imresize(x, size=DOWNSAMPLE_RATIO)\n x = crop(x)\n return x\n",
"step-5": "import pandas as pd\nimport numpy as np\nfrom scipy import misc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nimport time\nimport math\nimport cv2\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib import pyplot as plt\n\nfrom keras.models import Sequential\nfrom keras.layers import Lambda\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\n\n# Data augmentation constants\nTRANS_X_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)\nTRANS_Y_RANGE = 10 # Number of translation pixels for augmented data (-RANGE/2, RANGE/2)\nTRANS_ANGLE = .3 # Maximum angle change when translating in the X direction\nOFF_CENTER_IMG = .25 # Angle change when using off center images\n\nDOWNSAMPLE_RATIO = 0.99\nLearning_Rate = 0.0001\nFOLDER = \"examples/\"\nEPOCHS = 4\nTRAINABLE = True\nBRIGHTNESS_RANGE = 0.15\nIMG_ROWS = 300\nIMG_COLS = 300\nSHAPE = (IMG_ROWS,IMG_COLS,3)\n\nSAMPLES_TRAIN = 5000\nSAMPLES_VALIDATION = 1000\n\n\ndef load_data(data):\n\ttemp = []\n\tfor i in range(len(data)):\n\t\tim = cv2.imread(data[i])\n\t\tim = misc.imresize(im,size=DOWNSAMPLE_RATIO)\n\t\tim = crop(im)\n\t\t# im = color_change(im)\n\t\ttemp.append(im)\n\treturn temp\n\ndef normalize(data):\n\ta=-0.5\n\tb=0.5\n\tgreyscale_min=0\n\tgreyscale_max=255\n\treturn a + ( ( (data - greyscale_min)*(b - a) )/(greyscale_max - greyscale_min))\n\ndef color_change(data):\n\tx = cv2.cvtColor(data,cv2.COLOR_BGR2HSV)\n\treturn x\n\ndef adjust_brightness(im):\n\ttemp = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n\t# Compute a random brightness value and apply to the image\n\tbrightness = BRIGHTNESS_RANGE * np.random.uniform(-1,1)\n\ttemp[:, :, 2] = temp[:, :, 2] * (1-brightness)\n\t# Convert back to RGB and return\n\treturn cv2.cvtColor(temp, cv2.COLOR_HSV2RGB)\n\ndef img_translate(img, angle):\n\n\t# Randomly form the X translation distance and compute the resulting steering angle change\n\tchange = np.random.uniform(-0.5,0.5)\n\tx_translation = (TRANS_X_RANGE * change)\n\tnew_angle = angle + (change * TRANS_ANGLE)\n\n\t# Randomly compute a Y translation\n\ty_translation = (TRANS_Y_RANGE * np.random.uniform(-0.5,0.5))\n\n\t# Form the translation matrix\n\ttranslation_matrix = np.float32([[1, 0, x_translation], [0, 1, y_translation]])\n\n\t# Translate the image\n\treturn cv2.warpAffine(img, translation_matrix, (img.shape[1], img.shape[0])),new_angle\n\n\ndef crop(im):\n\tshape = np.array(im).shape\n\ty1 = int(shape[0]*0.4)\n\ty2 = int(shape[0]*0.87)\n\t# print(y)\n\tim = im[y1:y2 , : , :]\n\tim = cv2.resize(im, (IMG_ROWS, IMG_COLS), interpolation=cv2.INTER_AREA)\n\treturn im\n\ndef curve_focus(xdata,ydata):\n\tcount = 0\n\tfor x in range(len(xdata)):\n\t\tif(ydata[x]==0.000):\n\t\t\tcount+=1\n\tprint(\"Total = {}\\n0 Steering = {}\".format(len(xdata),count))\n\treturn xdata,ydata\n\ndef flip(xdata,ydata):\n\tfor x in range(len(xdata)):\n\t\tflip = np.fliplr(xdata[x])\n\t\txdata = np.append(xdata, [flip], axis=0)\n\t\tydata = np.append(ydata, (-1*ydata[x]))\n\treturn xdata,ydata\n\ndef set_model():\n\tmodel = Sequential()\n\tmodel.add(Lambda(lambda x: x/127.5 - 1.,\n\t\tinput_shape=SHAPE,\n\t\toutput_shape=SHAPE))\n\tmodel.add(Convolution2D(3, 1, 1, border_mode='same', name='color_conv'))\n\tmodel.add(Convolution2D(36,5,5,border_mode='same',activation=\"elu\",\n\t name='conv1'))\n\tmodel.add(Convolution2D(48,3,3,activation=\"elu\",border_mode='same',\n\t name='conv2'))\n\tmodel.add(Convolution2D(64,3,3,activation=\"elu\",border_mode='same', \n\t\tname='conv3'))\n\tmodel.add(Convolution2D(64,3,3,activation=\"elu\",border_mode='same', name='conv4'))\n\tmodel.add(Flatten(name='flat1'))\n\t# model.add(Dropout(0.2))\n\t# model.add(Dense(1164, activation=\"elu\"))\n\t# model.add(Dropout(.3, name='drop1'))\n\tmodel.add(Dense(100, activation=\"elu\", name='dense1'))\n\tmodel.add(Dense(50, activation=\"elu\", name='dense2'))\n\tmodel.add(Dense(10, activation=\"elu\", name='dense3'))\n\tmodel.add(Dense(1, activation=\"linear\", name='dense4'))\n\treturn model\n\ndef my_range(start, end, step):\n\twhile start <= end:\n\t\tyield round(start,1)\n\t\tstart += step\n\ndef show_data(log):\n\tfig = plt.figure(figsize=(8,2))\n\ta = fig.add_subplot(1,2,1)\n\tim = cv2.imread(FOLDER+log[560,0].strip())\n\tim = cv2.cvtColor(im,cv2.COLOR_BGR2RGB)\n\ta.set_title(\"Full Resolution\")\n\tplt.axis('off')\n\tplt.imshow(im)\n\tim = misc.imresize(im,size=0.2)\n\ta = fig.add_subplot(1,2,2)\n\ta.set_title(\"After 80% Downsampling\")\n\tplt.imshow(im)\n\t# im = crop(im)\n\t# im, an = process_line(log[600])\n\t# a = fig.add_subplot(2,1,2)\n\t# im, an = process_line(log[600])\n\t# plt.imshow(im,aspect=\"auto\",interpolation=\"nearest\")\n\tplt.axis('off')\n\tfig.savefig('examples/Downsampling.png')\n\tplt.show()\n\texit()\n\t# plt.hist(steer,bins=100)\n\t# plt.show()\n\t# exit()\n\tcount = 1\n\ty = 0\n\tsteer = log[:,3]\n\tfor x in my_range(-0.8,0.7,0.1):\n\t\twhile 1:\n\t\t\ty = np.random.randint(len(steer))\n\t\t\tif(round(steer[y],1)==x):\n\t\t\t\tprint(\"Found {}\",(x))\n\t\t\t\tbreak\n\t\t\t# else:\n\t\t\t# \tprint(\"Discarded {}\",steer[y])\n\t\ta=fig.add_subplot(4,5,count)\n\t\tim = cv2.imread(FOLDER+log[y,0])\n\t\tim,angle = process_line(log[y])\n\t\ta.set_title(str(x)+\" to \"+str(round(angle,1)))\n\t\tplt.imshow(im,aspect=\"auto\",interpolation=\"nearest\")\n\t\tcount+=1\n\t\t# print(x)\n\tplt.show()\n\n\texit()\n\tpic = np.random.randint(len(X_train))\n\tprint(X_train.shape)\n\tplt.imshow(X_train[pic])\n\tplt.show()\n\texit()\n\ndef augment(x,y):\n\tx,y = flip(x,y)\n\treturn x,y\n\ndef process_line(sample):\n\n\timg_choice = np.random.randint(3)\t\n\n\tangle = 0.0\n\tif(img_choice==0):\n\t\tangle = float(sample[3])\n\telif(img_choice==1):\n\t\tangle = float(sample[3])+0.27\n\telif(img_choice==2):\n\t\tangle = float(sample[3])-0.27\n\n\tim = cv2.imread(FOLDER+sample[img_choice].strip())\n\tim = misc.imresize(im,size=DOWNSAMPLE_RATIO)\n\tim = crop(im)\n\tim = adjust_brightness(im)\n\tim,angle = img_translate(im,angle)\n\t# im = normalize(im)\n\n\treturn im,angle\n\ndef generator(samples, batch_size=32):\n\t\"\"\"\n\tPurpose: Yield tensor batches to fit_generator function\n\tInputs: A file path\n\tOutputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix\n\tWhere AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE\n\t\"\"\"\n\tnum_samples = len(samples)\n\tshuffle(samples)\n\twhile 1: # Loop forever so the generator never terminates\n\t\tfor offset in range(0, num_samples, batch_size):\n\t\t\tbatch_samples = samples[offset:offset+batch_size]\n\n\t\t\timages = []\n\t\t\tangles = []\n\t\t\tfor batch_sample in batch_samples:\n\t\t\t\timage,angle = process_line(batch_sample)\n\t\t\t\timages.append(image)\n\t\t\t\tangles.append(angle)\n\n\t\t\t# trim image to only see section with road\n\t\t\tX_train = np.array(images)\n\t\t\ty_train = np.array(angles)\n\t\t\tX_train, y_train = augment(X_train,y_train)\n\t\t\tyield shuffle(X_train, y_train)\n\n\nif __name__ == \"__main__\":\n\tlog = pd.read_csv(FOLDER+\"driving_log.csv\").values\n\tshow_data(log)\n\t\n\n\tprint(log.shape)\n\ttrain_samples, validation_samples = train_test_split(log,test_size=0.2)\n\t\n\tim,an = process_line(train_samples[np.random.randint(len(train_samples))])\n\tprint(np.array(im).shape)\n\t# plt.imshow(im)\n\t# plt.title(str(an))\n\t# plt.show()\n\t# exit()\n\tmodel = set_model()\n\t# model.load_weights('weights.h5',by_name=True) \n\n\tadam = Adam(lr=Learning_Rate)\n\tmodel.compile(optimizer = adam, loss = 'mean_squared_error')\n\thistory=model.fit_generator(generator(train_samples), samples_per_epoch = \n\t\t\tSAMPLES_TRAIN, validation_data=generator(validation_samples), \n\t\t\tnb_val_samples=SAMPLES_VALIDATION, nb_epoch=EPOCHS, verbose=1)\n\tmodel.save_weights('weights.h5')\n\tmodel.save('model.h5')\n\t\n\tprint(\"Model saved\")\n\ndef for_drive(im):\n\tprint(im.shape)\n\tx = im\n\tx = misc.imresize(x,size=DOWNSAMPLE_RATIO)\n\tx = crop(x)\n\t# plt.imshow(x)\n\t# plt.show()\n\t# x = color_change(x)\n\t# x = normalize(x)\n\treturn x\n\n",
"step-ids": [
8,
10,
12,
16,
19
]
}
|
[
8,
10,
12,
16,
19
] |
# Generated by Django 3.0.5 on 2020-05-12 13:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='idcard',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, null=True)),
('employment_id', models.CharField(max_length=20, null=True)),
('customer_account_no', models.CharField(max_length=20, null=True)),
('circle', models.CharField(max_length=20, null=True)),
('company_name', models.CharField(max_length=20, null=True)),
('department', models.CharField(max_length=20, null=True)),
('certificate_no', models.CharField(max_length=20)),
('date', models.CharField(max_length=20, null=True)),
],
),
]
|
normal
|
{
"blob_id": "422873f89468b1faabed96f72f463b6294b85276",
"index": 5314,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='idcard', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20,\n null=True)), ('employment_id', models.CharField(max_length=20, null\n =True)), ('customer_account_no', models.CharField(max_length=20,\n null=True)), ('circle', models.CharField(max_length=20, null=True)),\n ('company_name', models.CharField(max_length=20, null=True)), (\n 'department', models.CharField(max_length=20, null=True)), (\n 'certificate_no', models.CharField(max_length=20)), ('date', models\n .CharField(max_length=20, null=True))])]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='idcard', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=20,\n null=True)), ('employment_id', models.CharField(max_length=20, null\n =True)), ('customer_account_no', models.CharField(max_length=20,\n null=True)), ('circle', models.CharField(max_length=20, null=True)),\n ('company_name', models.CharField(max_length=20, null=True)), (\n 'department', models.CharField(max_length=20, null=True)), (\n 'certificate_no', models.CharField(max_length=20)), ('date', models\n .CharField(max_length=20, null=True))])]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-05-12 13:26\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='idcard',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20, null=True)),\n ('employment_id', models.CharField(max_length=20, null=True)),\n ('customer_account_no', models.CharField(max_length=20, null=True)),\n ('circle', models.CharField(max_length=20, null=True)),\n ('company_name', models.CharField(max_length=20, null=True)),\n ('department', models.CharField(max_length=20, null=True)),\n ('certificate_no', models.CharField(max_length=20)),\n ('date', models.CharField(max_length=20, null=True)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from base_plugin import *
from plugin_utils import *
from datetime import datetime
import time
class LogPlugin(Plugin):
def initialize(self):
self.add_trigger(on_message)
self.add_command("!chatsearch", self.search)
self.add_command("!chatreplay", self.replay)
def run(self, message):
append_to_file(str(datetime.now()) + " : " + message.From + " : " + message.Body + '\n', "chatlog.log")
def search(self, message, query, *additional_queries):
chat_history = read_lines_from_file("chatlog.log")
chat_history.reverse()
found_line = None
for line in chat_history:
if query in line:
found_line = line
for additional_query in additional_queries:
if additional_query not in line:
found_line = None
break
if found_line:
break
if found_line:
self.send_message(message.From, line)
return
def replay(self, message, startTime, endTime = None):
start_time = None
end_time = None
try:
start_time = datetime.strptime(startTime, "%Y-%m-%d,%H:%M")
if endTime:
end_time = datetime.strptime(endTime, "%Y-%m-%d,%H:%M")
except Exception as e:
self.send_message(message.From, "Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; " + str(e))
return
chat_history = read_lines_from_file("chatlog.log")
for line in chat_history:
line_tokens = line.split(" : ")
line_time = None
try:
line_time = datetime.strptime(line_tokens[0], "%Y-%m-%d %H:%M:%S.%f")
except:
continue
#2.6 compatibility.
delta = (line_time - start_time)
delta_seconds = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6
if ((line_time > start_time ) \
and ( end_time and line_time < end_time )) \
or (not end_time and abs(delta_seconds) < 10):
self.send_message(message.From, line)
time.sleep(1)
self.send_message(message.From, "Done replay.")
|
normal
|
{
"blob_id": "d932ab84848c9a8ca8bb23a57424b8f6190b6260",
"index": 2563,
"step-1": "<mask token>\n\n\nclass LogPlugin(Plugin):\n <mask token>\n <mask token>\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-2": "<mask token>\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n <mask token>\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-3": "<mask token>\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n\n def run(self, message):\n append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +\n message.Body + '\\n', 'chatlog.log')\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-4": "from base_plugin import *\nfrom plugin_utils import *\nfrom datetime import datetime\nimport time\n\n\nclass LogPlugin(Plugin):\n\n def initialize(self):\n self.add_trigger(on_message)\n self.add_command('!chatsearch', self.search)\n self.add_command('!chatreplay', self.replay)\n\n def run(self, message):\n append_to_file(str(datetime.now()) + ' : ' + message.From + ' : ' +\n message.Body + '\\n', 'chatlog.log')\n\n def search(self, message, query, *additional_queries):\n chat_history = read_lines_from_file('chatlog.log')\n chat_history.reverse()\n found_line = None\n for line in chat_history:\n if query in line:\n found_line = line\n for additional_query in additional_queries:\n if additional_query not in line:\n found_line = None\n break\n if found_line:\n break\n if found_line:\n self.send_message(message.From, line)\n return\n\n def replay(self, message, startTime, endTime=None):\n start_time = None\n end_time = None\n try:\n start_time = datetime.strptime(startTime, '%Y-%m-%d,%H:%M')\n if endTime:\n end_time = datetime.strptime(endTime, '%Y-%m-%d,%H:%M')\n except Exception as e:\n self.send_message(message.From, \n 'Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; '\n + str(e))\n return\n chat_history = read_lines_from_file('chatlog.log')\n for line in chat_history:\n line_tokens = line.split(' : ')\n line_time = None\n try:\n line_time = datetime.strptime(line_tokens[0],\n '%Y-%m-%d %H:%M:%S.%f')\n except:\n continue\n delta = line_time - start_time\n delta_seconds = (delta.microseconds + (delta.seconds + delta.\n days * 24 * 3600) * 10 ** 6) / 10 ** 6\n if line_time > start_time and (end_time and line_time < end_time\n ) or not end_time and abs(delta_seconds) < 10:\n self.send_message(message.From, line)\n time.sleep(1)\n self.send_message(message.From, 'Done replay.')\n",
"step-5": "from base_plugin import *\nfrom plugin_utils import *\n\nfrom datetime import datetime\nimport time\n\n\nclass LogPlugin(Plugin):\n\tdef initialize(self):\n\t\tself.add_trigger(on_message)\n\n\t\tself.add_command(\"!chatsearch\", self.search)\n\t\tself.add_command(\"!chatreplay\", self.replay)\n\n\n\tdef run(self, message):\n\t\tappend_to_file(str(datetime.now()) + \" : \" + message.From + \" : \" + message.Body + '\\n', \"chatlog.log\")\n\n\n\tdef search(self, message, query, *additional_queries):\n\t\tchat_history = read_lines_from_file(\"chatlog.log\")\n\t\tchat_history.reverse()\n\n\t\tfound_line = None\n\t\tfor line in chat_history:\n\t\t\tif query in line:\n\t\t\t\tfound_line = line\n\t\t\t\tfor additional_query in additional_queries:\n\t\t\t\t\tif additional_query not in line:\n\t\t\t\t\t\tfound_line = None\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif found_line:\n\t\t\t\t\tbreak\n\n\t\tif found_line:\n\t\t\tself.send_message(message.From, line)\n\n\t\treturn\n\n\tdef replay(self, message, startTime, endTime = None):\n\t\tstart_time = None\n\t\tend_time = None\n\t\ttry:\n\t\t\tstart_time = datetime.strptime(startTime, \"%Y-%m-%d,%H:%M\")\n\t\t\tif endTime:\n\t\t\t\tend_time = datetime.strptime(endTime, \"%Y-%m-%d,%H:%M\")\n\t\texcept Exception as e:\n\t\t\tself.send_message(message.From, \"Expects inputs in the format: !chatreplay <yyyy-mm-dd,hh:mm> [<yyyyy-mm-dd,hh:mm>] ; \" + str(e))\n\t\t\treturn\n\n\n\t\tchat_history = read_lines_from_file(\"chatlog.log\")\n\n\t\tfor line in chat_history:\n\t\t\tline_tokens = line.split(\" : \")\n\n\t\t\tline_time = None\n\t\t\ttry:\n\t\t\t\tline_time = datetime.strptime(line_tokens[0], \"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t#2.6 compatibility.\n\t\t\tdelta = (line_time - start_time)\n\t\t\tdelta_seconds = (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6\n\n\t\t\tif ((line_time > start_time ) \\\n\t\t\t\t\tand ( end_time and line_time < end_time )) \\\n\t\t\t\tor (not end_time and abs(delta_seconds) < 10):\n\t\t\t\t\tself.send_message(message.From, line)\n\t\t\t\t\ttime.sleep(1)\n\n\t\tself.send_message(message.From, \"Done replay.\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.db import models
# Create your models here.
class GameGenre(models.Model):
genreName = models.CharField(max_length=100)
genreDescription = models.CharField(max_length=300)
def __str__(self):
return "%s" % (self.genreName)
class Game(models.Model):
gameName = models.CharField(max_length=100)
genre = models.ForeignKey(GameGenre)
def __str__(self):
return "%s, %s" % (self.gameName, self.genre)
class Players(models.Model):
playerName = models.CharField(max_length=100)
games = models.ManyToManyField(Game)
def __str__(self):
return "%s" % (self.playerName)
|
normal
|
{
"blob_id": "092242cdb231e09ccf3dd4dccfb6d786c3e4aad2",
"index": 8036,
"step-1": "<mask token>\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n",
"step-2": "<mask token>\n\n\nclass GameGenre(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n",
"step-3": "<mask token>\n\n\nclass GameGenre(models.Model):\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n",
"step-4": "from django.db import models\n\n\nclass GameGenre(models.Model):\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return '%s' % self.genreName\n\n\nclass Game(models.Model):\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return '%s, %s' % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return '%s' % self.playerName\n",
"step-5": "from django.db import models\n\n# Create your models here.\n\n\nclass GameGenre(models.Model):\n\n genreName = models.CharField(max_length=100)\n genreDescription = models.CharField(max_length=300)\n\n def __str__(self):\n return \"%s\" % (self.genreName)\n\n\nclass Game(models.Model):\n\n gameName = models.CharField(max_length=100)\n genre = models.ForeignKey(GameGenre)\n\n def __str__(self):\n return \"%s, %s\" % (self.gameName, self.genre)\n\n\nclass Players(models.Model):\n\n playerName = models.CharField(max_length=100)\n games = models.ManyToManyField(Game)\n\n def __str__(self):\n return \"%s\" % (self.playerName)\n",
"step-ids": [
6,
8,
9,
10,
11
]
}
|
[
6,
8,
9,
10,
11
] |
import os
import datetime
import traceback
import json
import requests
import logging
from model import Product
from naver_api import naver_client_id, naver_client_secret
DEBUG = False
if not DEBUG:
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('botocore').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
# print(naver_client_id)
# print(naver_client_secret)
products = list(Product.scan(Product.do_crawl==True))
for product in products:
product.search_lowest_price()
print('{} product(s) crawled'.format(len(products)))
|
normal
|
{
"blob_id": "76905171602cbeb53903a4b0259685288da3a083",
"index": 6365,
"step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-2": "<mask token>\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n<mask token>\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-3": "<mask token>\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-4": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n products = list(Product.scan(Product.do_crawl == True))\n for product in products:\n product.search_lowest_price()\n print('{} product(s) crawled'.format(len(products)))\n",
"step-5": "import os\nimport datetime\nimport traceback\nimport json\nimport requests\nimport logging\n\nfrom model import Product\nfrom naver_api import naver_client_id, naver_client_secret\n\n\nDEBUG = False\nif not DEBUG:\n logging.getLogger('boto3').setLevel(logging.WARNING)\n logging.getLogger('botocore').setLevel(logging.WARNING)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n # print(naver_client_id)\n # print(naver_client_secret)\n\n products = list(Product.scan(Product.do_crawl==True))\n\n for product in products:\n product.search_lowest_price()\n\n print('{} product(s) crawled'.format(len(products)))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import tkinter as tk
from tkinter import messagebox
from os.path import join
from pynput import keyboard
from src.save_count import SaveCount
class MainApplication(tk.Frame):
def __init__(self, root: tk.Tk):
super().__init__(root)
self.root = root
self.pack(padx=32, pady=32, expand=True)
self.root.option_add("*tearOff", False)
self.root.title("Counter")
frm_buttons = tk.Frame(self)
frm_buttons.grid(row=0, column=0)
self.var_count = tk.IntVar(frm_buttons)
tk.Button(frm_buttons, textvariable=self.var_count, command=self.count_up, font="Times, 60") \
.grid(row=0, column=0, columnspan=3, sticky=tk.NSEW)
tk.Button(frm_buttons, text="-1", command=self.count_down).grid(row=1, column=0)
tk.Button(frm_buttons, text="Reset", command=self.reset).grid(row=1, column=1)
tk.Button(frm_buttons, text="Save", command=self.save).grid(row=1, column=2)
# tk.Button(frm_buttons, text="Undecorated Window", command=None).grid(row=2, column=0, columnspan=3)
self.selected_count = ""
self.lst_counts = tk.Listbox(self)
self.lst_counts.grid(row=0, column=1)
self.lst_counts.bind("<<ListboxSelect>>", self.listbox_select)
self.lst_counts.bind("<Button-3>", self.right_click)
self.men_list = tk.Menu(self)
self.men_list.add_command(label="Delete Selected", command=self.delete_save)
self.root.bind("<Key>", self.key_press)
try:
saves = os.listdir("data")
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return
for count_save in saves:
self.lst_counts.insert(tk.END, count_save)
listener = keyboard.Listener(on_release=self.on_key_release)
listener.start()
def count_up(self):
# Save to entry, if it's the first one
if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:
SaveCount(tk.Toplevel(), self.save_to_listbox)
return
else:
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
self.var_count.set(self.var_count.get() + 1)
self.save_to_file(self.selected_count)
def count_down(self):
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
self.var_count.set(self.var_count.get() - 1)
self.save_to_file(self.selected_count)
def reset(self):
if not self.selected_count:
messagebox.showerror("No Configuration Selected", "Please select a configuration.")
return
choice = messagebox.askyesno("Reset", "Are you sure you want to reset the count?")
if choice:
self.var_count.set(0)
self.save_to_file(self.selected_count)
def save(self):
SaveCount(tk.Toplevel(), self.save_to_listbox)
def save_to_listbox(self, name: str):
if self.save_to_file(name): # If save is successful
self.lst_counts.insert(tk.END, name)
def save_to_file(self, name: str) -> bool:
try:
with open(join("data", name), "w") as file:
file.write(str(self.var_count.get()))
return True
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return False
def delete_save(self):
try:
name_of_selected_count = self.lst_counts.get(int(self.lst_counts.curselection()[0]))
except IndexError:
return
os.remove(join("data", name_of_selected_count))
for i in range(self.lst_counts.size()):
if self.lst_counts.get(i) == name_of_selected_count:
self.lst_counts.delete(i)
def listbox_select(self, event):
widget = event.widget
try:
name_of_selected_count = widget.get(int(widget.curselection()[0]))
except IndexError:
return
try:
with open(join("data", name_of_selected_count), "r") as file:
count = int(file.read())
except FileNotFoundError:
os.mkdir("data")
messagebox.showerror("Save Error", "No data folder was found; created one now.")
return
self.var_count.set(count)
self.selected_count = name_of_selected_count
def right_click(self, event):
self.men_list.tk_popup(event.x_root, event.y_root)
def key_press(self, event):
if event.char == " ":
self.count_up()
def on_key_release(self, key):
if key == keyboard.KeyCode.from_char("+"):
self.count_up()
def main():
root = tk.Tk()
MainApplication(root)
root.mainloop()
|
normal
|
{
"blob_id": "7e2bf898eb1c0118205042797e6dac535342979b",
"index": 185,
"step-1": "<mask token>\n\n\nclass MainApplication(tk.Frame):\n <mask token>\n\n def count_up(self):\n if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n return\n elif not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() + 1)\n self.save_to_file(self.selected_count)\n\n def count_down(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() - 1)\n self.save_to_file(self.selected_count)\n <mask token>\n\n def save(self):\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n <mask token>\n\n def save_to_file(self, name: str) ->bool:\n try:\n with open(join('data', name), 'w') as file:\n file.write(str(self.var_count.get()))\n return True\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return False\n <mask token>\n <mask token>\n <mask token>\n\n def key_press(self, event):\n if event.char == ' ':\n self.count_up()\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainApplication(tk.Frame):\n\n def __init__(self, root: tk.Tk):\n super().__init__(root)\n self.root = root\n self.pack(padx=32, pady=32, expand=True)\n self.root.option_add('*tearOff', False)\n self.root.title('Counter')\n frm_buttons = tk.Frame(self)\n frm_buttons.grid(row=0, column=0)\n self.var_count = tk.IntVar(frm_buttons)\n tk.Button(frm_buttons, textvariable=self.var_count, command=self.\n count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,\n sticky=tk.NSEW)\n tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row\n =1, column=0)\n tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,\n column=1)\n tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,\n column=2)\n self.selected_count = ''\n self.lst_counts = tk.Listbox(self)\n self.lst_counts.grid(row=0, column=1)\n self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)\n self.lst_counts.bind('<Button-3>', self.right_click)\n self.men_list = tk.Menu(self)\n self.men_list.add_command(label='Delete Selected', command=self.\n delete_save)\n self.root.bind('<Key>', self.key_press)\n try:\n saves = os.listdir('data')\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n for count_save in saves:\n self.lst_counts.insert(tk.END, count_save)\n listener = keyboard.Listener(on_release=self.on_key_release)\n listener.start()\n\n def count_up(self):\n if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n return\n elif not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() + 1)\n self.save_to_file(self.selected_count)\n\n def count_down(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() - 1)\n self.save_to_file(self.selected_count)\n\n def reset(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n choice = messagebox.askyesno('Reset',\n 'Are you sure you want to reset the count?')\n if choice:\n self.var_count.set(0)\n self.save_to_file(self.selected_count)\n\n def save(self):\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n <mask token>\n\n def save_to_file(self, name: str) ->bool:\n try:\n with open(join('data', name), 'w') as file:\n file.write(str(self.var_count.get()))\n return True\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return False\n <mask token>\n\n def listbox_select(self, event):\n widget = event.widget\n try:\n name_of_selected_count = widget.get(int(widget.curselection()[0]))\n except IndexError:\n return\n try:\n with open(join('data', name_of_selected_count), 'r') as file:\n count = int(file.read())\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n self.var_count.set(count)\n self.selected_count = name_of_selected_count\n <mask token>\n\n def key_press(self, event):\n if event.char == ' ':\n self.count_up()\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MainApplication(tk.Frame):\n\n def __init__(self, root: tk.Tk):\n super().__init__(root)\n self.root = root\n self.pack(padx=32, pady=32, expand=True)\n self.root.option_add('*tearOff', False)\n self.root.title('Counter')\n frm_buttons = tk.Frame(self)\n frm_buttons.grid(row=0, column=0)\n self.var_count = tk.IntVar(frm_buttons)\n tk.Button(frm_buttons, textvariable=self.var_count, command=self.\n count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,\n sticky=tk.NSEW)\n tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row\n =1, column=0)\n tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,\n column=1)\n tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,\n column=2)\n self.selected_count = ''\n self.lst_counts = tk.Listbox(self)\n self.lst_counts.grid(row=0, column=1)\n self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)\n self.lst_counts.bind('<Button-3>', self.right_click)\n self.men_list = tk.Menu(self)\n self.men_list.add_command(label='Delete Selected', command=self.\n delete_save)\n self.root.bind('<Key>', self.key_press)\n try:\n saves = os.listdir('data')\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n for count_save in saves:\n self.lst_counts.insert(tk.END, count_save)\n listener = keyboard.Listener(on_release=self.on_key_release)\n listener.start()\n\n def count_up(self):\n if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n return\n elif not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() + 1)\n self.save_to_file(self.selected_count)\n\n def count_down(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() - 1)\n self.save_to_file(self.selected_count)\n\n def reset(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n choice = messagebox.askyesno('Reset',\n 'Are you sure you want to reset the count?')\n if choice:\n self.var_count.set(0)\n self.save_to_file(self.selected_count)\n\n def save(self):\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n\n def save_to_listbox(self, name: str):\n if self.save_to_file(name):\n self.lst_counts.insert(tk.END, name)\n\n def save_to_file(self, name: str) ->bool:\n try:\n with open(join('data', name), 'w') as file:\n file.write(str(self.var_count.get()))\n return True\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return False\n\n def delete_save(self):\n try:\n name_of_selected_count = self.lst_counts.get(int(self.\n lst_counts.curselection()[0]))\n except IndexError:\n return\n os.remove(join('data', name_of_selected_count))\n for i in range(self.lst_counts.size()):\n if self.lst_counts.get(i) == name_of_selected_count:\n self.lst_counts.delete(i)\n\n def listbox_select(self, event):\n widget = event.widget\n try:\n name_of_selected_count = widget.get(int(widget.curselection()[0]))\n except IndexError:\n return\n try:\n with open(join('data', name_of_selected_count), 'r') as file:\n count = int(file.read())\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n self.var_count.set(count)\n self.selected_count = name_of_selected_count\n\n def right_click(self, event):\n self.men_list.tk_popup(event.x_root, event.y_root)\n\n def key_press(self, event):\n if event.char == ' ':\n self.count_up()\n\n def on_key_release(self, key):\n if key == keyboard.KeyCode.from_char('+'):\n self.count_up()\n\n\ndef main():\n root = tk.Tk()\n MainApplication(root)\n root.mainloop()\n",
"step-4": "import os\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom os.path import join\nfrom pynput import keyboard\nfrom src.save_count import SaveCount\n\n\nclass MainApplication(tk.Frame):\n\n def __init__(self, root: tk.Tk):\n super().__init__(root)\n self.root = root\n self.pack(padx=32, pady=32, expand=True)\n self.root.option_add('*tearOff', False)\n self.root.title('Counter')\n frm_buttons = tk.Frame(self)\n frm_buttons.grid(row=0, column=0)\n self.var_count = tk.IntVar(frm_buttons)\n tk.Button(frm_buttons, textvariable=self.var_count, command=self.\n count_up, font='Times, 60').grid(row=0, column=0, columnspan=3,\n sticky=tk.NSEW)\n tk.Button(frm_buttons, text='-1', command=self.count_down).grid(row\n =1, column=0)\n tk.Button(frm_buttons, text='Reset', command=self.reset).grid(row=1,\n column=1)\n tk.Button(frm_buttons, text='Save', command=self.save).grid(row=1,\n column=2)\n self.selected_count = ''\n self.lst_counts = tk.Listbox(self)\n self.lst_counts.grid(row=0, column=1)\n self.lst_counts.bind('<<ListboxSelect>>', self.listbox_select)\n self.lst_counts.bind('<Button-3>', self.right_click)\n self.men_list = tk.Menu(self)\n self.men_list.add_command(label='Delete Selected', command=self.\n delete_save)\n self.root.bind('<Key>', self.key_press)\n try:\n saves = os.listdir('data')\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n for count_save in saves:\n self.lst_counts.insert(tk.END, count_save)\n listener = keyboard.Listener(on_release=self.on_key_release)\n listener.start()\n\n def count_up(self):\n if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n return\n elif not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() + 1)\n self.save_to_file(self.selected_count)\n\n def count_down(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n self.var_count.set(self.var_count.get() - 1)\n self.save_to_file(self.selected_count)\n\n def reset(self):\n if not self.selected_count:\n messagebox.showerror('No Configuration Selected',\n 'Please select a configuration.')\n return\n choice = messagebox.askyesno('Reset',\n 'Are you sure you want to reset the count?')\n if choice:\n self.var_count.set(0)\n self.save_to_file(self.selected_count)\n\n def save(self):\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n\n def save_to_listbox(self, name: str):\n if self.save_to_file(name):\n self.lst_counts.insert(tk.END, name)\n\n def save_to_file(self, name: str) ->bool:\n try:\n with open(join('data', name), 'w') as file:\n file.write(str(self.var_count.get()))\n return True\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return False\n\n def delete_save(self):\n try:\n name_of_selected_count = self.lst_counts.get(int(self.\n lst_counts.curselection()[0]))\n except IndexError:\n return\n os.remove(join('data', name_of_selected_count))\n for i in range(self.lst_counts.size()):\n if self.lst_counts.get(i) == name_of_selected_count:\n self.lst_counts.delete(i)\n\n def listbox_select(self, event):\n widget = event.widget\n try:\n name_of_selected_count = widget.get(int(widget.curselection()[0]))\n except IndexError:\n return\n try:\n with open(join('data', name_of_selected_count), 'r') as file:\n count = int(file.read())\n except FileNotFoundError:\n os.mkdir('data')\n messagebox.showerror('Save Error',\n 'No data folder was found; created one now.')\n return\n self.var_count.set(count)\n self.selected_count = name_of_selected_count\n\n def right_click(self, event):\n self.men_list.tk_popup(event.x_root, event.y_root)\n\n def key_press(self, event):\n if event.char == ' ':\n self.count_up()\n\n def on_key_release(self, key):\n if key == keyboard.KeyCode.from_char('+'):\n self.count_up()\n\n\ndef main():\n root = tk.Tk()\n MainApplication(root)\n root.mainloop()\n",
"step-5": "import os\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom os.path import join\n\nfrom pynput import keyboard\n\nfrom src.save_count import SaveCount\n\n\nclass MainApplication(tk.Frame):\n\n def __init__(self, root: tk.Tk):\n super().__init__(root)\n self.root = root\n self.pack(padx=32, pady=32, expand=True)\n\n self.root.option_add(\"*tearOff\", False)\n self.root.title(\"Counter\")\n\n frm_buttons = tk.Frame(self)\n frm_buttons.grid(row=0, column=0)\n\n self.var_count = tk.IntVar(frm_buttons)\n\n tk.Button(frm_buttons, textvariable=self.var_count, command=self.count_up, font=\"Times, 60\") \\\n .grid(row=0, column=0, columnspan=3, sticky=tk.NSEW)\n\n tk.Button(frm_buttons, text=\"-1\", command=self.count_down).grid(row=1, column=0)\n tk.Button(frm_buttons, text=\"Reset\", command=self.reset).grid(row=1, column=1)\n tk.Button(frm_buttons, text=\"Save\", command=self.save).grid(row=1, column=2)\n\n # tk.Button(frm_buttons, text=\"Undecorated Window\", command=None).grid(row=2, column=0, columnspan=3)\n\n self.selected_count = \"\"\n\n self.lst_counts = tk.Listbox(self)\n self.lst_counts.grid(row=0, column=1)\n self.lst_counts.bind(\"<<ListboxSelect>>\", self.listbox_select)\n self.lst_counts.bind(\"<Button-3>\", self.right_click)\n\n self.men_list = tk.Menu(self)\n self.men_list.add_command(label=\"Delete Selected\", command=self.delete_save)\n\n self.root.bind(\"<Key>\", self.key_press)\n\n try:\n saves = os.listdir(\"data\")\n except FileNotFoundError:\n os.mkdir(\"data\")\n messagebox.showerror(\"Save Error\", \"No data folder was found; created one now.\")\n return\n for count_save in saves:\n self.lst_counts.insert(tk.END, count_save)\n\n listener = keyboard.Listener(on_release=self.on_key_release)\n listener.start()\n\n def count_up(self):\n # Save to entry, if it's the first one\n if self.var_count.get() == 0 and self.lst_counts.index(tk.END) == 0:\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n return\n else:\n if not self.selected_count:\n messagebox.showerror(\"No Configuration Selected\", \"Please select a configuration.\")\n return\n\n self.var_count.set(self.var_count.get() + 1)\n self.save_to_file(self.selected_count)\n\n def count_down(self):\n if not self.selected_count:\n messagebox.showerror(\"No Configuration Selected\", \"Please select a configuration.\")\n return\n\n self.var_count.set(self.var_count.get() - 1)\n self.save_to_file(self.selected_count)\n\n def reset(self):\n if not self.selected_count:\n messagebox.showerror(\"No Configuration Selected\", \"Please select a configuration.\")\n return\n\n choice = messagebox.askyesno(\"Reset\", \"Are you sure you want to reset the count?\")\n if choice:\n self.var_count.set(0)\n self.save_to_file(self.selected_count)\n\n def save(self):\n SaveCount(tk.Toplevel(), self.save_to_listbox)\n\n def save_to_listbox(self, name: str):\n if self.save_to_file(name): # If save is successful\n self.lst_counts.insert(tk.END, name)\n\n def save_to_file(self, name: str) -> bool:\n try:\n with open(join(\"data\", name), \"w\") as file:\n file.write(str(self.var_count.get()))\n return True\n except FileNotFoundError:\n os.mkdir(\"data\")\n messagebox.showerror(\"Save Error\", \"No data folder was found; created one now.\")\n return False\n\n def delete_save(self):\n try:\n name_of_selected_count = self.lst_counts.get(int(self.lst_counts.curselection()[0]))\n except IndexError:\n return\n\n os.remove(join(\"data\", name_of_selected_count))\n\n for i in range(self.lst_counts.size()):\n if self.lst_counts.get(i) == name_of_selected_count:\n self.lst_counts.delete(i)\n\n def listbox_select(self, event):\n widget = event.widget\n try:\n name_of_selected_count = widget.get(int(widget.curselection()[0]))\n except IndexError:\n return\n\n try:\n with open(join(\"data\", name_of_selected_count), \"r\") as file:\n count = int(file.read())\n except FileNotFoundError:\n os.mkdir(\"data\")\n messagebox.showerror(\"Save Error\", \"No data folder was found; created one now.\")\n return\n\n self.var_count.set(count)\n self.selected_count = name_of_selected_count\n\n def right_click(self, event):\n self.men_list.tk_popup(event.x_root, event.y_root)\n\n def key_press(self, event):\n if event.char == \" \":\n self.count_up()\n\n def on_key_release(self, key):\n if key == keyboard.KeyCode.from_char(\"+\"):\n self.count_up()\n\n\ndef main():\n root = tk.Tk()\n MainApplication(root)\n root.mainloop()\n",
"step-ids": [
6,
9,
14,
15,
16
]
}
|
[
6,
9,
14,
15,
16
] |
# models.py
from sentiment_data import *
from utils import *
import nltk
from nltk.corpus import stopwords
import numpy as np
from scipy.sparse import csr_matrix
class FeatureExtractor(object):
"""
Feature extraction base type. Takes a sentence and returns an indexed list of features.
"""
def get_indexer(self):
raise Exception("Don't call me, call my subclasses")
def extract_features(self, ex_words: List[str], add_to_indexer: bool=False) -> List[int]:
"""
Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to
:param ex_words: words in the example to featurize
:param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.
At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.
:return:
"""
raise Exception("Don't call me, call my subclasses")
class UnigramFeatureExtractor(FeatureExtractor):
"""
Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts
and any additional preprocessing you want to do.
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
for sentimentExample in train_exs:
words = sentimentExample.words
for word in words:
lowercase = word.lower()
if not lowercase in stop_words:
indexer.add_and_get_index(lowercase)
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col), dtype=np.int)
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
if len(col) > 0:
feat = feat * (1. / len(col))
return feat
class BigramFeatureExtractor(FeatureExtractor):
"""
Bigram feature extractor analogous to the unigram one.
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
for sentimentExample in train_exs:
words = sentimentExample.words
previous_word = None
for word in words:
if previous_word is not None:
if not (previous_word.lower() in stop_words and word.lower() in stop_words):
indexer.add_and_get_index((previous_word.lower(), word.lower()))
previous_word = word
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = []
previous_word = None
for word in sentence:
if previous_word is not None:
if self.indexer.contains((previous_word.lower(), word.lower())):
col.append(self.indexer.index_of((previous_word.lower(), word.lower())))
previous_word = word
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col), dtype=np.int)
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
if len(col) > 0:
feat = feat * (1. / len(col))
return feat
class BetterFeatureExtractor(FeatureExtractor):
"""
Better feature extractor...try whatever you can think of!
"""
def __init__(self, indexer: Indexer, train_exs, stop_words):
# unigram
for sentimentExample in train_exs:
words = sentimentExample.words
for word in words:
lowercase = word.lower()
if not lowercase in stop_words:
indexer.add_and_get_index(lowercase)
# bigram
for sentimentExample in train_exs:
words = sentimentExample.words
previous_word = None
for word in words:
if previous_word is not None:
if not (previous_word.lower() in stop_words and word.lower() in stop_words):
indexer.add_and_get_index((previous_word.lower(), word.lower()))
previous_word = word
self.indexer = indexer
self.corpus_length = len(indexer)
self.feats = []
for i, sentimentExample in enumerate(train_exs):
sentence = sentimentExample.words
self.feats.append(self.calculate_sentence_probability(sentence))
def calculate_sentence_probability(self, sentence):
col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]
unigram_count = len(col)
previous_word = None
for word in sentence:
if previous_word is not None:
if self.indexer.contains((previous_word.lower(), word.lower())):
col.append(self.indexer.index_of((previous_word.lower(), word.lower())))
previous_word = word
bigram_count = len(col) - unigram_count
row = np.zeros(len(col), dtype=np.int)
data = np.ones(len(col))
data[:unigram_count] = data[:unigram_count] * 1. / unigram_count
data[unigram_count:unigram_count + bigram_count] = data[unigram_count:unigram_count + bigram_count] * 1. / bigram_count
feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))
return feat
class SentimentClassifier(object):
"""
Sentiment classifier base type
"""
def predict(self, ex_words: List[str]) -> int:
"""
:param ex_words: words (List[str]) in the sentence to classify
:return: Either 0 for negative class or 1 for positive class
"""
raise Exception("Don't call me, call my subclasses")
class TrivialSentimentClassifier(SentimentClassifier):
"""
Sentiment classifier that always predicts the positive class.
"""
def predict(self, ex_words: List[str]) -> int:
return 1
class PerceptronClassifier(SentimentClassifier):
"""
Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier
superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to
modify the constructor to pass these in.
"""
def __init__(self):
raise Exception("Must be implemented")
class LogisticRegressionClassifier(SentimentClassifier):
"""
Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier
superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to
modify the constructor to pass these in.
"""
def __init__(self, feat_size, feat_extractor):
self.w = np.zeros(feat_size)
self.feat_extractor = feat_extractor
def predict(self, sentence):
feat = self.feat_extractor.calculate_sentence_probability(sentence)
return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)
def train_perceptron(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> PerceptronClassifier:
"""
Train a classifier with the perceptron.
:param train_exs: training set, List of SentimentExample objects
:param feat_extractor: feature extractor to use
:return: trained PerceptronClassifier model
"""
raise Exception("Must be implemented")
def train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:
"""
Train a logistic regression model.
:param train_exs: training set, List of SentimentExample objects
:param feat_extractor: feature extractor to use
:return: trained LogisticRegressionClassifier model
"""
lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)
alpha = 1e0
# beta = 1e-4
for epoch in range(8):
loss = 0.
acc = 0
indices = np.arange(len(train_exs))
np.random.shuffle(indices)
for i in indices:
feat = feat_extractor.feats[i]
sentimentExample = train_exs[i]
y = sentimentExample.label
z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]
loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \
# + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]
predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)
acc += (predict == y)
grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w
lr.w = lr.w - alpha * grad
print("epoch {:d}, loss: {:f}, accuracy: {:f}".format(epoch, loss / len(train_exs), acc / len(train_exs)))
for i in indices:
feat = feat_extractor.feats[i]
sentimentExample = train_exs[i]
y = sentimentExample.label
z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]
loss += -y * np.log(z) - (1 - y) * np.log(1 - z)
print("training loss: {:f}".format(loss / len(train_exs)))
return lr
def train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:
"""
Main entry point for your modifications. Trains and returns one of several models depending on the args
passed in from the main method. You may modify this function, but probably will not need to.
:param args: args bundle from sentiment_classifier.py
:param train_exs: training set, List of SentimentExample objects
:return: trained SentimentClassifier model, of whichever type is specified
"""
# Initialize feature extractor
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
if args.model == "TRIVIAL":
feat_extractor = None
elif args.feats == "UNIGRAM":
feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs, stop_words)
elif args.feats == "BIGRAM":
# Add additional preprocessing code here
feat_extractor = BigramFeatureExtractor(Indexer(), train_exs, stop_words)
elif args.feats == "BETTER":
# Add additional preprocessing code here
feat_extractor = BetterFeatureExtractor(Indexer(), train_exs, stop_words)
else:
raise Exception("Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system")
# Train the model
if args.model == "TRIVIAL":
model = TrivialSentimentClassifier()
elif args.model == "PERCEPTRON":
model = train_perceptron(train_exs, feat_extractor)
elif args.model == "LR":
model = train_logistic_regression(train_exs, feat_extractor)
else:
raise Exception("Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system")
return model
|
normal
|
{
"blob_id": "5d8d47d77fba9027d7c5ec4e672fc0c597b76eae",
"index": 4091,
"step-1": "<mask token>\n\n\nclass UnigramFeatureExtractor(FeatureExtractor):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = []\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n unigram_count = len(col)\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n bigram_count = len(col) - unigram_count\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col))\n data[:unigram_count] = data[:unigram_count] * 1.0 / unigram_count\n data[unigram_count:unigram_count + bigram_count] = data[unigram_count\n :unigram_count + bigram_count] * 1.0 / bigram_count\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n return feat\n\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n \"\"\"\n :param ex_words: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self):\n raise Exception('Must be implemented')\n\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self, feat_size, feat_extractor):\n self.w = np.zeros(feat_size)\n self.feat_extractor = feat_extractor\n\n def predict(self, sentence):\n feat = self.feat_extractor.calculate_sentence_probability(sentence)\n return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FeatureExtractor(object):\n \"\"\"\n Feature extraction base type. Takes a sentence and returns an indexed list of features.\n \"\"\"\n\n def get_indexer(self):\n raise Exception(\"Don't call me, call my subclasses\")\n\n def extract_features(self, ex_words: List[str], add_to_indexer: bool=False\n ) ->List[int]:\n \"\"\"\n Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to\n :param ex_words: words in the example to featurize\n :param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.\n At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.\n :return:\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass UnigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts\n and any additional preprocessing you want to do.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = []\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n unigram_count = len(col)\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n bigram_count = len(col) - unigram_count\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col))\n data[:unigram_count] = data[:unigram_count] * 1.0 / unigram_count\n data[unigram_count:unigram_count + bigram_count] = data[unigram_count\n :unigram_count + bigram_count] * 1.0 / bigram_count\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n return feat\n\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n \"\"\"\n :param ex_words: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self):\n raise Exception('Must be implemented')\n\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self, feat_size, feat_extractor):\n self.w = np.zeros(feat_size)\n self.feat_extractor = feat_extractor\n\n def predict(self, sentence):\n feat = self.feat_extractor.calculate_sentence_probability(sentence)\n return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FeatureExtractor(object):\n \"\"\"\n Feature extraction base type. Takes a sentence and returns an indexed list of features.\n \"\"\"\n\n def get_indexer(self):\n raise Exception(\"Don't call me, call my subclasses\")\n\n def extract_features(self, ex_words: List[str], add_to_indexer: bool=False\n ) ->List[int]:\n \"\"\"\n Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to\n :param ex_words: words in the example to featurize\n :param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.\n At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.\n :return:\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass UnigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts\n and any additional preprocessing you want to do.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = []\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n unigram_count = len(col)\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n bigram_count = len(col) - unigram_count\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col))\n data[:unigram_count] = data[:unigram_count] * 1.0 / unigram_count\n data[unigram_count:unigram_count + bigram_count] = data[unigram_count\n :unigram_count + bigram_count] * 1.0 / bigram_count\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n return feat\n\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n \"\"\"\n :param ex_words: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self):\n raise Exception('Must be implemented')\n\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self, feat_size, feat_extractor):\n self.w = np.zeros(feat_size)\n self.feat_extractor = feat_extractor\n\n def predict(self, sentence):\n feat = self.feat_extractor.calculate_sentence_probability(sentence)\n return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)\n\n\n<mask token>\n\n\ndef train_model(args, train_exs: List[SentimentExample]) ->SentimentClassifier:\n \"\"\"\n Main entry point for your modifications. Trains and returns one of several models depending on the args\n passed in from the main method. You may modify this function, but probably will not need to.\n :param args: args bundle from sentiment_classifier.py\n :param train_exs: training set, List of SentimentExample objects\n :return: trained SentimentClassifier model, of whichever type is specified\n \"\"\"\n nltk.download('stopwords')\n stop_words = set(stopwords.words('english'))\n if args.model == 'TRIVIAL':\n feat_extractor = None\n elif args.feats == 'UNIGRAM':\n feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs,\n stop_words)\n elif args.feats == 'BIGRAM':\n feat_extractor = BigramFeatureExtractor(Indexer(), train_exs,\n stop_words)\n elif args.feats == 'BETTER':\n feat_extractor = BetterFeatureExtractor(Indexer(), train_exs,\n stop_words)\n else:\n raise Exception(\n 'Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system')\n if args.model == 'TRIVIAL':\n model = TrivialSentimentClassifier()\n elif args.model == 'PERCEPTRON':\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == 'LR':\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\n 'Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system')\n return model\n",
"step-4": "<mask token>\n\n\nclass FeatureExtractor(object):\n \"\"\"\n Feature extraction base type. Takes a sentence and returns an indexed list of features.\n \"\"\"\n\n def get_indexer(self):\n raise Exception(\"Don't call me, call my subclasses\")\n\n def extract_features(self, ex_words: List[str], add_to_indexer: bool=False\n ) ->List[int]:\n \"\"\"\n Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to\n :param ex_words: words in the example to featurize\n :param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.\n At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.\n :return:\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass UnigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts\n and any additional preprocessing you want to do.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = []\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1.0 / len(col))\n return feat\n\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.\n lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(),\n word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if\n self.indexer.contains(word.lower())]\n unigram_count = len(col)\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())\n ):\n col.append(self.indexer.index_of((previous_word.lower(),\n word.lower())))\n previous_word = word\n bigram_count = len(col) - unigram_count\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col))\n data[:unigram_count] = data[:unigram_count] * 1.0 / unigram_count\n data[unigram_count:unigram_count + bigram_count] = data[unigram_count\n :unigram_count + bigram_count] * 1.0 / bigram_count\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n return feat\n\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n \"\"\"\n :param ex_words: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n\n def predict(self, ex_words: List[str]) ->int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self):\n raise Exception('Must be implemented')\n\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n\n def __init__(self, feat_size, feat_extractor):\n self.w = np.zeros(feat_size)\n self.feat_extractor = feat_extractor\n\n def predict(self, sentence):\n feat = self.feat_extractor.calculate_sentence_probability(sentence)\n return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)\n\n\n<mask token>\n\n\ndef train_logistic_regression(train_exs: List[SentimentExample],\n feat_extractor: FeatureExtractor) ->LogisticRegressionClassifier:\n \"\"\"\n Train a logistic regression model.\n :param train_exs: training set, List of SentimentExample objects\n :param feat_extractor: feature extractor to use\n :return: trained LogisticRegressionClassifier model\n \"\"\"\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length,\n feat_extractor)\n alpha = 1.0\n for epoch in range(8):\n loss = 0.0\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += predict == y\n grad = (z - y) * feat.toarray()[0]\n lr.w = lr.w - alpha * grad\n print('epoch {:d}, loss: {:f}, accuracy: {:f}'.format(epoch, loss /\n len(train_exs), acc / len(train_exs)))\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print('training loss: {:f}'.format(loss / len(train_exs)))\n return lr\n\n\ndef train_model(args, train_exs: List[SentimentExample]) ->SentimentClassifier:\n \"\"\"\n Main entry point for your modifications. Trains and returns one of several models depending on the args\n passed in from the main method. You may modify this function, but probably will not need to.\n :param args: args bundle from sentiment_classifier.py\n :param train_exs: training set, List of SentimentExample objects\n :return: trained SentimentClassifier model, of whichever type is specified\n \"\"\"\n nltk.download('stopwords')\n stop_words = set(stopwords.words('english'))\n if args.model == 'TRIVIAL':\n feat_extractor = None\n elif args.feats == 'UNIGRAM':\n feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs,\n stop_words)\n elif args.feats == 'BIGRAM':\n feat_extractor = BigramFeatureExtractor(Indexer(), train_exs,\n stop_words)\n elif args.feats == 'BETTER':\n feat_extractor = BetterFeatureExtractor(Indexer(), train_exs,\n stop_words)\n else:\n raise Exception(\n 'Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system')\n if args.model == 'TRIVIAL':\n model = TrivialSentimentClassifier()\n elif args.model == 'PERCEPTRON':\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == 'LR':\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\n 'Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system')\n return model\n",
"step-5": "# models.py\n\nfrom sentiment_data import *\nfrom utils import *\nimport nltk\nfrom nltk.corpus import stopwords\nimport numpy as np\nfrom scipy.sparse import csr_matrix\n\nclass FeatureExtractor(object):\n \"\"\"\n Feature extraction base type. Takes a sentence and returns an indexed list of features.\n \"\"\"\n def get_indexer(self):\n raise Exception(\"Don't call me, call my subclasses\")\n\n def extract_features(self, ex_words: List[str], add_to_indexer: bool=False) -> List[int]:\n \"\"\"\n Extract features from a sentence represented as a list of words. Includes a flag add_to_indexer to\n :param ex_words: words in the example to featurize\n :param add_to_indexer: True if we should grow the dimensionality of the featurizer if new features are encountered.\n At test time, any unseen features should be discarded, but at train time, we probably want to keep growing it.\n :return:\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass UnigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Extracts unigram bag-of-words features from a sentence. It's up to you to decide how you want to handle counts\n and any additional preprocessing you want to do.\n \"\"\"\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n self.indexer = indexer\n self.corpus_length = len(indexer)\n\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1. / len(col))\n return feat\n\nclass BigramFeatureExtractor(FeatureExtractor):\n \"\"\"\n Bigram feature extractor analogous to the unigram one.\n \"\"\"\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(), word.lower()))\n previous_word = word\n self.indexer = indexer\n self.corpus_length = len(indexer)\n\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = []\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())):\n col.append(self.indexer.index_of((previous_word.lower(), word.lower())))\n previous_word = word\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col), dtype=np.int)\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n if len(col) > 0:\n feat = feat * (1. / len(col))\n return feat\n\nclass BetterFeatureExtractor(FeatureExtractor):\n \"\"\"\n Better feature extractor...try whatever you can think of!\n \"\"\"\n def __init__(self, indexer: Indexer, train_exs, stop_words):\n # unigram\n for sentimentExample in train_exs:\n words = sentimentExample.words\n for word in words:\n lowercase = word.lower()\n if not lowercase in stop_words:\n indexer.add_and_get_index(lowercase)\n\n # bigram\n for sentimentExample in train_exs:\n words = sentimentExample.words\n previous_word = None\n for word in words:\n if previous_word is not None:\n if not (previous_word.lower() in stop_words and word.lower() in stop_words):\n indexer.add_and_get_index((previous_word.lower(), word.lower()))\n previous_word = word\n\n self.indexer = indexer\n self.corpus_length = len(indexer)\n\n self.feats = []\n for i, sentimentExample in enumerate(train_exs):\n sentence = sentimentExample.words\n self.feats.append(self.calculate_sentence_probability(sentence))\n\n def calculate_sentence_probability(self, sentence):\n col = [self.indexer.index_of(word.lower()) for word in sentence if self.indexer.contains(word.lower())]\n unigram_count = len(col)\n\n previous_word = None\n for word in sentence:\n if previous_word is not None:\n if self.indexer.contains((previous_word.lower(), word.lower())):\n col.append(self.indexer.index_of((previous_word.lower(), word.lower())))\n previous_word = word\n bigram_count = len(col) - unigram_count\n row = np.zeros(len(col), dtype=np.int)\n data = np.ones(len(col))\n data[:unigram_count] = data[:unigram_count] * 1. / unigram_count\n data[unigram_count:unigram_count + bigram_count] = data[unigram_count:unigram_count + bigram_count] * 1. / bigram_count\n feat = csr_matrix((data, (row, col)), shape=(1, self.corpus_length))\n return feat\n\nclass SentimentClassifier(object):\n \"\"\"\n Sentiment classifier base type\n \"\"\"\n def predict(self, ex_words: List[str]) -> int:\n \"\"\"\n :param ex_words: words (List[str]) in the sentence to classify\n :return: Either 0 for negative class or 1 for positive class\n \"\"\"\n raise Exception(\"Don't call me, call my subclasses\")\n\n\nclass TrivialSentimentClassifier(SentimentClassifier):\n \"\"\"\n Sentiment classifier that always predicts the positive class.\n \"\"\"\n def predict(self, ex_words: List[str]) -> int:\n return 1\n\n\nclass PerceptronClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n def __init__(self):\n raise Exception(\"Must be implemented\")\n\n\nclass LogisticRegressionClassifier(SentimentClassifier):\n \"\"\"\n Implement this class -- you should at least have init() and implement the predict method from the SentimentClassifier\n superclass. Hint: you'll probably need this class to wrap both the weight vector and featurizer -- feel free to\n modify the constructor to pass these in.\n \"\"\"\n def __init__(self, feat_size, feat_extractor):\n self.w = np.zeros(feat_size)\n self.feat_extractor = feat_extractor\n\n def predict(self, sentence):\n feat = self.feat_extractor.calculate_sentence_probability(sentence)\n return int(feat.dot(np.expand_dims(self.w, axis=1))[0, 0] > 0)\n\ndef train_perceptron(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> PerceptronClassifier:\n \"\"\"\n Train a classifier with the perceptron.\n :param train_exs: training set, List of SentimentExample objects\n :param feat_extractor: feature extractor to use\n :return: trained PerceptronClassifier model\n \"\"\"\n raise Exception(\"Must be implemented\")\n\ndef train_logistic_regression(train_exs: List[SentimentExample], feat_extractor: FeatureExtractor) -> LogisticRegressionClassifier:\n \"\"\"\n Train a logistic regression model.\n :param train_exs: training set, List of SentimentExample objects\n :param feat_extractor: feature extractor to use\n :return: trained LogisticRegressionClassifier model\n \"\"\"\n lr = LogisticRegressionClassifier(feat_extractor.corpus_length, feat_extractor)\n alpha = 1e0\n # beta = 1e-4\n for epoch in range(8):\n loss = 0.\n acc = 0\n indices = np.arange(len(train_exs))\n np.random.shuffle(indices)\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z) \\\n # + beta * np.expand_dims(lr.w, axis=0).dot(np.expand_dims(lr.w, axis=1))[0, 0]\n predict = int(feat.dot(np.expand_dims(lr.w, axis=1))[0, 0] > 0)\n acc += (predict == y)\n grad = (z - y) * feat.toarray()[0] # + 2 * beta * lr.w\n lr.w = lr.w - alpha * grad\n print(\"epoch {:d}, loss: {:f}, accuracy: {:f}\".format(epoch, loss / len(train_exs), acc / len(train_exs)))\n\n for i in indices:\n feat = feat_extractor.feats[i]\n sentimentExample = train_exs[i]\n y = sentimentExample.label\n z = 1 / (1 + np.exp(-feat.dot(np.expand_dims(lr.w, axis=1))))[0, 0]\n loss += -y * np.log(z) - (1 - y) * np.log(1 - z)\n print(\"training loss: {:f}\".format(loss / len(train_exs)))\n\n return lr\n\ndef train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:\n \"\"\"\n Main entry point for your modifications. Trains and returns one of several models depending on the args\n passed in from the main method. You may modify this function, but probably will not need to.\n :param args: args bundle from sentiment_classifier.py\n :param train_exs: training set, List of SentimentExample objects\n :return: trained SentimentClassifier model, of whichever type is specified\n \"\"\"\n # Initialize feature extractor\n nltk.download('stopwords')\n stop_words = set(stopwords.words('english'))\n\n if args.model == \"TRIVIAL\":\n feat_extractor = None\n elif args.feats == \"UNIGRAM\":\n feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs, stop_words)\n elif args.feats == \"BIGRAM\":\n # Add additional preprocessing code here\n feat_extractor = BigramFeatureExtractor(Indexer(), train_exs, stop_words)\n elif args.feats == \"BETTER\":\n # Add additional preprocessing code here\n feat_extractor = BetterFeatureExtractor(Indexer(), train_exs, stop_words)\n else:\n raise Exception(\"Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system\")\n\n # Train the model\n if args.model == \"TRIVIAL\":\n model = TrivialSentimentClassifier()\n elif args.model == \"PERCEPTRON\":\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == \"LR\":\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\"Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system\")\n return model",
"step-ids": [
22,
29,
30,
31,
34
]
}
|
[
22,
29,
30,
31,
34
] |
from fastapi import APIRouter, Depends, status, Response
from typing import List
import schemas, database
from sqlalchemy.orm import Session
import repository.blog as blog
from .oauth2 import get_current_user
router = APIRouter(
prefix="/blog",
tags=['Blog'])
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[schemas.ShowBlog])
def all_blog(db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request:schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowBlog)
def show(id, response: Response, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db)
|
normal
|
{
"blob_id": "7fd5e83d28e919e7b94cea290c6b4db3378938b6",
"index": 4600,
"step-1": "<mask token>\n\n\[email protected]('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\[email protected]('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\[email protected]('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-3": "<mask token>\nrouter = APIRouter(prefix='/blog', tags=['Blog'])\n\n\[email protected]('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\[email protected]('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-4": "from fastapi import APIRouter, Depends, status, Response\nfrom typing import List\nimport schemas, database\nfrom sqlalchemy.orm import Session\nimport repository.blog as blog\nfrom .oauth2 import get_current_user\nrouter = APIRouter(prefix='/blog', tags=['Blog'])\n\n\[email protected]('/', status_code=status.HTTP_200_OK, response_model=List[\n schemas.ShowBlog])\ndef all_blog(db: Session=Depends(database.get_db), current_user: schemas.\n User=Depends(get_current_user)):\n return blog.all_blog(db)\n\n\[email protected]('/', status_code=status.HTTP_201_CREATED)\ndef create(request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session=Depends(database.get_db), current_user: schemas\n .User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\n\[email protected]('/{id}', status_code=status.HTTP_200_OK, response_model=schemas\n .ShowBlog)\ndef show(id, response: Response, db: Session=Depends(database.get_db),\n current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)\n",
"step-5": "from fastapi import APIRouter, Depends, status, Response\nfrom typing import List\nimport schemas, database\nfrom sqlalchemy.orm import Session\nimport repository.blog as blog\nfrom .oauth2 import get_current_user\n\nrouter = APIRouter(\n prefix=\"/blog\",\n tags=['Blog'])\n\[email protected]('/', status_code=status.HTTP_200_OK, response_model=List[schemas.ShowBlog])\ndef all_blog(db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.all_blog(db)\n\[email protected]('/', status_code=status.HTTP_201_CREATED)\ndef create(request:schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.create(request, db)\n\[email protected]('/{id}', status_code=status.HTTP_200_OK)\ndef destroy(id, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.destroy(id, db)\n\[email protected]('/{id}', status_code=status.HTTP_202_ACCEPTED)\ndef update(id, request: schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.update(id, request, db)\n\[email protected]('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowBlog)\ndef show(id, response: Response, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):\n return blog.show(id, response, db)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import time
import datetime
import mx
from openerp.report import report_sxw
class course_form(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(course_form, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'time1': self._get_time,
'course':self._get_course,
'line':self._get_data,
'user':self._get_user,
})
self.year = int(time.strftime('%Y'))
def _get_user(self,data, header=False):
if header:
return self.pool.get('res.company').browse(self.cr, self.uid, data['form']['company_id'][0]).logo
else:
return self.pool.get('res.users').browse(self.cr, self.uid, self.uid).name
def _get_course(self,data):
training_category_obj = self.pool.get('hr.training.category')
training_category_id = data['training_category_id']
training_category_id = not training_category_id and training_category_obj.browse(self.cr,self.uid,[]) or training_category_id
self.cr.execute(" select distinct c.id as course_id , c.name as course_name "\
"from hr_training_course as c "\
"where c.training_category_id in %s",(tuple(training_category_id),))
res = self.cr.dictfetchall()
return res
def _get_data(self, data,course_id):
date1 = data['date_from']
date2 = data['date_to']
side = data['type'] == '3' and 'inside' or 'outside'
self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1).year or self.year
res=[]
if date1 and date2:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.start_date >= %s and t.end_date <= %s ",(tuple([course_id]),side,date1,date2))
elif date1 and not date2:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.start_date >= %s",(tuple([course_id]),side,date1))
elif date2 and not date1:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s and "\
"t.end_date <= %s ",(tuple([course_id]),side,date2))
else:
self.cr.execute(" select distinct emp.marital as marital, "\
"t.end_date as end,"\
"t.start_date as start,"\
"c.name as country,"\
"t.course_type as type,"\
"t.location as location,"\
"res.name as name " \
"from hr_employee_training t "\
"left join hr_employee_training_line line on (line.training_employee_id=t.id) "\
"left join hr_employee emp on (emp.id=line.employee_id) "\
"left join hr_job jop on (jop.id=emp.job_id) "\
"left join resource_resource res on (res.id=emp.resource_id) "\
"left join hr_training_course cou on(cou.id=t.course_id) "\
"left join res_country c on(t.country_id=c.id) "\
"where t.course_id = %s and "\
"t.type ='hr.approved.course' and t.training_place = %s ",(tuple([course_id]),side))
res=self.cr.dictfetchall()
return res
def _get_time(self):
return self.year
report_sxw.report_sxw('report.course.outside', 'hr.employee.training', 'addons/hr_ntc_custom/report/training.rml' ,parser=course_form ,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
normal
|
{
"blob_id": "c4fcca61e560046c77046079fb305be8c883653b",
"index": 2077,
"step-1": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n <mask token>\n <mask token>\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training',\n 'addons/hr_ntc_custom/report/training.rml', parser=course_form, header=\n False)\n",
"step-4": "import time\nimport datetime\nimport mx\nfrom openerp.report import report_sxw\n\n\nclass course_form(report_sxw.rml_parse):\n\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({'time': time, 'time1': self._get_time,\n 'course': self._get_course, 'line': self._get_data, 'user':\n self._get_user})\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self, data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid,\n data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid,\n self.uid).name\n\n def _get_course(self, data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = (not training_category_id and\n training_category_obj.browse(self.cr, self.uid, []) or\n training_category_id)\n self.cr.execute(\n ' select distinct c.id as course_id , c.name as course_name from hr_training_course as c where c.training_category_id in %s'\n , (tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data, course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1\n ).year or self.year\n res = []\n if date1 and date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date1, date2))\n elif date1 and not date2:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.start_date >= %s\"\n , (tuple([course_id]), side, date1))\n elif date2 and not date1:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s and t.end_date <= %s \"\n , (tuple([course_id]), side, date2))\n else:\n self.cr.execute(\n \" select distinct emp.marital as marital, t.end_date as end,t.start_date as start,c.name as country,t.course_type as type,t.location as location,res.name as name from hr_employee_training t left join hr_employee_training_line line on (line.training_employee_id=t.id) left join hr_employee emp on (emp.id=line.employee_id) left join hr_job jop on (jop.id=emp.job_id) left join resource_resource res on (res.id=emp.resource_id) left join hr_training_course cou on(cou.id=t.course_id) left join res_country c on(t.country_id=c.id) where t.course_id = %s and t.type ='hr.approved.course' and t.training_place = %s \"\n , (tuple([course_id]), side))\n res = self.cr.dictfetchall()\n return res\n\n def _get_time(self):\n return self.year\n\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training',\n 'addons/hr_ntc_custom/report/training.rml', parser=course_form, header=\n False)\n",
"step-5": "import time\nimport datetime\nimport mx\nfrom openerp.report import report_sxw\n\n\nclass course_form(report_sxw.rml_parse):\n def __init__(self, cr, uid, name, context):\n super(course_form, self).__init__(cr, uid, name, context)\n self.localcontext.update({\n 'time': time,\n 'time1': self._get_time,\n 'course':self._get_course,\n 'line':self._get_data,\n 'user':self._get_user,\n })\n self.year = int(time.strftime('%Y'))\n\n def _get_user(self,data, header=False):\n if header:\n return self.pool.get('res.company').browse(self.cr, self.uid, data['form']['company_id'][0]).logo\n else:\n return self.pool.get('res.users').browse(self.cr, self.uid, self.uid).name\n\n def _get_course(self,data):\n training_category_obj = self.pool.get('hr.training.category')\n training_category_id = data['training_category_id']\n training_category_id = not training_category_id and training_category_obj.browse(self.cr,self.uid,[]) or training_category_id\n self.cr.execute(\" select distinct c.id as course_id , c.name as course_name \"\\\n \"from hr_training_course as c \"\\\n \"where c.training_category_id in %s\",(tuple(training_category_id),))\n res = self.cr.dictfetchall()\n return res\n\n def _get_data(self, data,course_id):\n date1 = data['date_from']\n date2 = data['date_to']\n side = data['type'] == '3' and 'inside' or 'outside'\n self.year = date1 and mx.DateTime.Parser.DateTimeFromString(date1).year or self.year\n res=[]\n if date1 and date2:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.start_date >= %s and t.end_date <= %s \",(tuple([course_id]),side,date1,date2))\n elif date1 and not date2:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.start_date >= %s\",(tuple([course_id]),side,date1))\n elif date2 and not date1:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s and \"\\\n \"t.end_date <= %s \",(tuple([course_id]),side,date2))\n else:\n self.cr.execute(\" select distinct emp.marital as marital, \"\\\n \"t.end_date as end,\"\\\n \"t.start_date as start,\"\\\n \"c.name as country,\"\\\n \"t.course_type as type,\"\\\n \"t.location as location,\"\\\n \"res.name as name \" \\\n \"from hr_employee_training t \"\\\n \"left join hr_employee_training_line line on (line.training_employee_id=t.id) \"\\\n \"left join hr_employee emp on (emp.id=line.employee_id) \"\\\n \"left join hr_job jop on (jop.id=emp.job_id) \"\\\n \"left join resource_resource res on (res.id=emp.resource_id) \"\\\n \"left join hr_training_course cou on(cou.id=t.course_id) \"\\\n \"left join res_country c on(t.country_id=c.id) \"\\\n \"where t.course_id = %s and \"\\\n \"t.type ='hr.approved.course' and t.training_place = %s \",(tuple([course_id]),side))\n\n \n res=self.cr.dictfetchall()\n\n return res\n\n \n def _get_time(self):\n return self.year\n\nreport_sxw.report_sxw('report.course.outside', 'hr.employee.training', 'addons/hr_ntc_custom/report/training.rml' ,parser=course_form ,header=False)\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
# import core modules and community packages
import sys, math, random
import pygame
# import configuration settings
from src.config import *
from src.board.levels import LEVEL_1
# import game elements
from src.pucman import Pucman
from src.ghast import Ghast
from src.board.board import Board
class Session():
def __init__(self, MODE="PLAYING"):
# init all game props
pygame.init()
# initialize game elements
board = Board(
size=BOARD_SIZE,
color=COLOR['BACKGROUND'],
level=LEVEL_1
)
pucman = Pucman(
start=board.findUniquePos(BOARD_ELEMENT_MAP['PUCMAN_START']),
size=board.tileSize,
color=COLOR['PUCMAN'],
MODE=MODE
)
ghasts = {
"blinky": Ghast(
name="Blinky",
start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),
size=board.tileSize,
color=COLOR['BLINKY']
),
"pinky": Ghast(
name="Pinky",
start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),
size=board.tileSize,
color=COLOR['PINKY']
),
"inky": Ghast(
name="Inky",
start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),
size=board.tileSize,
color=COLOR['INKY']
),
"clyde": Ghast(
name="Clyde",
start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),
size=board.tileSize,
color=COLOR['CLYDE']
)
}
self.board = board
self.pucman = pucman
self.ghasts = ghasts
self.clock = pygame.time.Clock()
self.MODE = MODE
def start(self):
# draw background & begin session
self.board.draw()
session = True
# while playing
while session:
# manage game time, 5 ticks per second
self.clock.tick(TICK_RATE[self.MODE])
# pygame.time.delay(50)
# update player state
self.pucman.move(self.board)
# Ghast-AI behavior
for ghast in self.ghasts:
sprite = self.ghasts[ghast]
sprite.move(self.pucman.pos, self.board)
if(sprite.atPucman(self.pucman.pos)):
session = False
print("You died to " + sprite.name)
# begin drawing back to front
self.board.draw()
self.pucman.draw(self.board)
for ghast in self.ghasts:
self.ghasts[ghast].draw(self.board._)
# update board
pygame.display.update()
|
normal
|
{
"blob_id": "f51a21ed71ede4e7462d9c77cb932a5f05b09e71",
"index": 9174,
"step-1": "<mask token>\n\n\nclass Session:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Session:\n <mask token>\n\n def start(self):\n self.board.draw()\n session = True\n while session:\n self.clock.tick(TICK_RATE[self.MODE])\n self.pucman.move(self.board)\n for ghast in self.ghasts:\n sprite = self.ghasts[ghast]\n sprite.move(self.pucman.pos, self.board)\n if sprite.atPucman(self.pucman.pos):\n session = False\n print('You died to ' + sprite.name)\n self.board.draw()\n self.pucman.draw(self.board)\n for ghast in self.ghasts:\n self.ghasts[ghast].draw(self.board._)\n pygame.display.update()\n",
"step-3": "<mask token>\n\n\nclass Session:\n\n def __init__(self, MODE='PLAYING'):\n pygame.init()\n board = Board(size=BOARD_SIZE, color=COLOR['BACKGROUND'], level=LEVEL_1\n )\n pucman = Pucman(start=board.findUniquePos(BOARD_ELEMENT_MAP[\n 'PUCMAN_START']), size=board.tileSize, color=COLOR['PUCMAN'],\n MODE=MODE)\n ghasts = {'blinky': Ghast(name='Blinky', start=board.findUniquePos(\n BOARD_ELEMENT_MAP['GHAST_SPAWN']), size=board.tileSize, color=\n COLOR['BLINKY']), 'pinky': Ghast(name='Pinky', start=board.\n findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), size=board.\n tileSize, color=COLOR['PINKY']), 'inky': Ghast(name='Inky',\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),\n size=board.tileSize, color=COLOR['INKY']), 'clyde': Ghast(name=\n 'Clyde', start=board.findUniquePos(BOARD_ELEMENT_MAP[\n 'GHAST_SPAWN']), size=board.tileSize, color=COLOR['CLYDE'])}\n self.board = board\n self.pucman = pucman\n self.ghasts = ghasts\n self.clock = pygame.time.Clock()\n self.MODE = MODE\n\n def start(self):\n self.board.draw()\n session = True\n while session:\n self.clock.tick(TICK_RATE[self.MODE])\n self.pucman.move(self.board)\n for ghast in self.ghasts:\n sprite = self.ghasts[ghast]\n sprite.move(self.pucman.pos, self.board)\n if sprite.atPucman(self.pucman.pos):\n session = False\n print('You died to ' + sprite.name)\n self.board.draw()\n self.pucman.draw(self.board)\n for ghast in self.ghasts:\n self.ghasts[ghast].draw(self.board._)\n pygame.display.update()\n",
"step-4": "import sys, math, random\nimport pygame\nfrom src.config import *\nfrom src.board.levels import LEVEL_1\nfrom src.pucman import Pucman\nfrom src.ghast import Ghast\nfrom src.board.board import Board\n\n\nclass Session:\n\n def __init__(self, MODE='PLAYING'):\n pygame.init()\n board = Board(size=BOARD_SIZE, color=COLOR['BACKGROUND'], level=LEVEL_1\n )\n pucman = Pucman(start=board.findUniquePos(BOARD_ELEMENT_MAP[\n 'PUCMAN_START']), size=board.tileSize, color=COLOR['PUCMAN'],\n MODE=MODE)\n ghasts = {'blinky': Ghast(name='Blinky', start=board.findUniquePos(\n BOARD_ELEMENT_MAP['GHAST_SPAWN']), size=board.tileSize, color=\n COLOR['BLINKY']), 'pinky': Ghast(name='Pinky', start=board.\n findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), size=board.\n tileSize, color=COLOR['PINKY']), 'inky': Ghast(name='Inky',\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']),\n size=board.tileSize, color=COLOR['INKY']), 'clyde': Ghast(name=\n 'Clyde', start=board.findUniquePos(BOARD_ELEMENT_MAP[\n 'GHAST_SPAWN']), size=board.tileSize, color=COLOR['CLYDE'])}\n self.board = board\n self.pucman = pucman\n self.ghasts = ghasts\n self.clock = pygame.time.Clock()\n self.MODE = MODE\n\n def start(self):\n self.board.draw()\n session = True\n while session:\n self.clock.tick(TICK_RATE[self.MODE])\n self.pucman.move(self.board)\n for ghast in self.ghasts:\n sprite = self.ghasts[ghast]\n sprite.move(self.pucman.pos, self.board)\n if sprite.atPucman(self.pucman.pos):\n session = False\n print('You died to ' + sprite.name)\n self.board.draw()\n self.pucman.draw(self.board)\n for ghast in self.ghasts:\n self.ghasts[ghast].draw(self.board._)\n pygame.display.update()\n",
"step-5": "# import core modules and community packages\nimport sys, math, random\nimport pygame\n\n# import configuration settings\nfrom src.config import *\nfrom src.board.levels import LEVEL_1\n\n# import game elements\nfrom src.pucman import Pucman\nfrom src.ghast import Ghast\nfrom src.board.board import Board\n\nclass Session():\n def __init__(self, MODE=\"PLAYING\"):\n # init all game props\n pygame.init()\n\n # initialize game elements\n board = Board(\n size=BOARD_SIZE, \n color=COLOR['BACKGROUND'], \n level=LEVEL_1\n )\n pucman = Pucman(\n start=board.findUniquePos(BOARD_ELEMENT_MAP['PUCMAN_START']), \n size=board.tileSize, \n color=COLOR['PUCMAN'], \n MODE=MODE\n )\n ghasts = {\n \"blinky\": Ghast(\n name=\"Blinky\",\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), \n size=board.tileSize, \n color=COLOR['BLINKY']\n ),\n \"pinky\": Ghast(\n name=\"Pinky\",\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), \n size=board.tileSize, \n color=COLOR['PINKY']\n ),\n \"inky\": Ghast(\n name=\"Inky\",\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), \n size=board.tileSize, \n color=COLOR['INKY']\n ),\n \"clyde\": Ghast(\n name=\"Clyde\",\n start=board.findUniquePos(BOARD_ELEMENT_MAP['GHAST_SPAWN']), \n size=board.tileSize, \n color=COLOR['CLYDE'] \n )\n }\n\n self.board = board\n self.pucman = pucman\n self.ghasts = ghasts\n self.clock = pygame.time.Clock()\n self.MODE = MODE\n\n def start(self):\n # draw background & begin session\n self.board.draw()\n session = True\n\n # while playing\n while session:\n # manage game time, 5 ticks per second\n self.clock.tick(TICK_RATE[self.MODE])\n # pygame.time.delay(50)\n\n # update player state\n self.pucman.move(self.board)\n\n # Ghast-AI behavior\n for ghast in self.ghasts:\n sprite = self.ghasts[ghast]\n\n sprite.move(self.pucman.pos, self.board)\n if(sprite.atPucman(self.pucman.pos)):\n session = False\n print(\"You died to \" + sprite.name)\n\n # begin drawing back to front\n self.board.draw()\n self.pucman.draw(self.board)\n for ghast in self.ghasts:\n self.ghasts[ghast].draw(self.board._)\n \n # update board\n pygame.display.update()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def label_modes(trip_list, silent=True):
"""Labels trip segments by likely mode of travel.
Labels are "chilling" if traveler is stationary, "walking" if slow,
"driving" if fast, and "bogus" if too fast to be real.
trip_list [list]: a list of dicts in JSON format.
silent [bool]: if True, does not print reports.
Returns list of dicts in JSON format."""
if silent == False:
print('Preparing to label modes of travel for ' \
+ str(len(trip_list)) + ' trips.')
loop_counter = 0
loop_size = len(trip_list)
for doc in trip_list:
if silent == False:
loop_counter = loop_counter + 1
if loop_counter % 10000 == 0:
print('Labeling modes. Finished ' + str(loop_counter) \
+ ' trips.')
time_spent_driving = 0
time_spent_walking = 0
time_spent_chilling = 0
time_spent_bogus = 0
for i in range(1,len(doc['reduction'])):
if (float(doc['reduction'][i]['velocity']) >= 2.3):
doc['reduction'][i]['mode'] = 'driving'
elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):
doc['reduction'][i]['mode'] = 'walking'
elif (float(doc['reduction'][i]['velocity']) == 0.0):
doc['reduction'][i]['mode'] = 'chilling'
if (float(doc['reduction'][i]['velocity']) > 22.22):
doc['reduction'][i]['mode'] = 'bogus'
for i in range(1,len(doc['reduction']) - 1):
path_length = 0
if (doc['reduction'][i]['mode'] == 'driving'):
for j in range(i+1,len(doc['reduction'])):
last_intersection_id = doc['reduction'][j]['IntersectionID']
if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1
elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break
if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'
else :
for k in range(i+1,j):
if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'
if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])
if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])
duration_of_trip = float(doc['duration_of_trip'])
doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)
doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)
doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)
doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)
if silent == False:
print('Done labeling mode of travel. Returning list of length ' \
+ str(len(trip_list)) + '.')
return trip_list
|
normal
|
{
"blob_id": "3f4e8402bbd096a33ed159ca0fed250c74c2f876",
"index": 4833,
"step-1": "<mask token>\n",
"step-2": "def label_modes(trip_list, silent=True):\n \"\"\"Labels trip segments by likely mode of travel.\n\n Labels are \"chilling\" if traveler is stationary, \"walking\" if slow,\n \"driving\" if fast, and \"bogus\" if too fast to be real.\n\n trip_list [list]: a list of dicts in JSON format.\n silent [bool]: if True, does not print reports.\n\n Returns list of dicts in JSON format.\"\"\"\n if silent == False:\n print('Preparing to label modes of travel for ' + str(len(trip_list\n )) + ' trips.')\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) +\n ' trips.')\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1, len(doc['reduction'])):\n if float(doc['reduction'][i]['velocity']) >= 2.3:\n doc['reduction'][i]['mode'] = 'driving'\n elif float(doc['reduction'][i]['velocity']) < 2.3 and float(doc\n ['reduction'][i]['velocity']) > 0:\n doc['reduction'][i]['mode'] = 'walking'\n elif float(doc['reduction'][i]['velocity']) == 0.0:\n doc['reduction'][i]['mode'] = 'chilling'\n if float(doc['reduction'][i]['velocity']) > 22.22:\n doc['reduction'][i]['mode'] = 'bogus'\n for i in range(1, len(doc['reduction']) - 1):\n path_length = 0\n if doc['reduction'][i]['mode'] == 'driving':\n for j in range(i + 1, len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID'\n ]\n if doc['reduction'][j]['mode'] == 'walking':\n path_length = path_length + 1\n elif doc['reduction'][j]['mode'] == 'driving' or doc[\n 'reduction'][j]['mode'] == 'bogus':\n break\n if path_length > 5 or last_intersection_id == doc['reduction'][\n i]['IntersectionID']:\n for k in range(i + 1, j):\n if doc['reduction'][k]['mode'] != 'chilling':\n doc['reduction'][k]['mode'] = 'walking'\n else:\n for k in range(i + 1, j):\n if doc['reduction'][k]['mode'] != 'chilling':\n doc['reduction'][k]['mode'] = 'driving'\n if doc['reduction'][i]['mode'] == 'driving':\n time_spent_driving = time_spent_driving + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'walking':\n time_spent_walking = time_spent_walking + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'chilling':\n time_spent_chilling = time_spent_chilling + float(doc[\n 'reduction'][i]['time']) - float(doc['reduction'][i - 1\n ]['time'])\n elif doc['reduction'][i]['mode'] == 'bogus':\n time_spent_bogus = time_spent_bogus + float(doc['reduction'\n ][i]['time']) - float(doc['reduction'][i - 1]['time'])\n if doc['reduction'][-1]['mode'] == 'driving':\n time_spent_driving = time_spent_driving + float(doc['reduction'\n ][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'walking':\n time_spent_walking = time_spent_walking + float(doc['reduction'\n ][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'chilling':\n time_spent_chilling = time_spent_chilling + float(doc[\n 'reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif doc['reduction'][-1]['mode'] == 'bogus':\n time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1\n ]['time']) - float(doc['reduction'][-2]['time'])\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving /\n duration_of_trip * 100)\n doc['time_percentage_walking'] = str(time_spent_walking /\n duration_of_trip * 100)\n doc['time_percentage_chilling'] = str(time_spent_chilling /\n duration_of_trip * 100)\n doc['time_percentage_bogus'] = str(time_spent_bogus /\n duration_of_trip * 100)\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' +\n str(len(trip_list)) + '.')\n return trip_list\n",
"step-3": "def label_modes(trip_list, silent=True):\n \"\"\"Labels trip segments by likely mode of travel.\n\n Labels are \"chilling\" if traveler is stationary, \"walking\" if slow,\n \"driving\" if fast, and \"bogus\" if too fast to be real.\n\n trip_list [list]: a list of dicts in JSON format.\n silent [bool]: if True, does not print reports.\n\n Returns list of dicts in JSON format.\"\"\"\n\n\n if silent == False:\n print('Preparing to label modes of travel for ' \\\n + str(len(trip_list)) + ' trips.')\n\n loop_counter = 0\n loop_size = len(trip_list)\n for doc in trip_list:\n\n if silent == False:\n loop_counter = loop_counter + 1\n if loop_counter % 10000 == 0:\n print('Labeling modes. Finished ' + str(loop_counter) \\\n + ' trips.')\n\n time_spent_driving = 0\n time_spent_walking = 0\n time_spent_chilling = 0\n time_spent_bogus = 0\n for i in range(1,len(doc['reduction'])):\n if (float(doc['reduction'][i]['velocity']) >= 2.3):\n doc['reduction'][i]['mode'] = 'driving'\n\n elif (float(doc['reduction'][i]['velocity']) < 2.3 and float(doc['reduction'][i]['velocity']) > 0):\n doc['reduction'][i]['mode'] = 'walking'\n\n elif (float(doc['reduction'][i]['velocity']) == 0.0):\n doc['reduction'][i]['mode'] = 'chilling'\n\n if (float(doc['reduction'][i]['velocity']) > 22.22):\n doc['reduction'][i]['mode'] = 'bogus'\n\n\n for i in range(1,len(doc['reduction']) - 1):\n path_length = 0\n\n if (doc['reduction'][i]['mode'] == 'driving'):\n for j in range(i+1,len(doc['reduction'])):\n last_intersection_id = doc['reduction'][j]['IntersectionID']\n if (doc['reduction'][j]['mode'] == 'walking'): path_length = path_length + 1\n elif (doc['reduction'][j]['mode'] == 'driving' or doc['reduction'][j]['mode'] == 'bogus'): break\n\n if (path_length > 5 or last_intersection_id == doc['reduction'][i]['IntersectionID']):\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'walking'\n else :\n for k in range(i+1,j):\n if (doc['reduction'][k]['mode'] != 'chilling'): doc['reduction'][k]['mode'] = 'driving'\n\n if (doc['reduction'][i]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n elif (doc['reduction'][i]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][i]['time']) - float(doc['reduction'][i-1]['time'])\n\n if (doc['reduction'][-1]['mode'] == 'driving'): time_spent_driving = time_spent_driving + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'walking'): time_spent_walking = time_spent_walking + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'chilling'): time_spent_chilling = time_spent_chilling + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n elif (doc['reduction'][-1]['mode'] == 'bogus'): time_spent_bogus = time_spent_bogus + float(doc['reduction'][-1]['time']) - float(doc['reduction'][-2]['time'])\n\n\n duration_of_trip = float(doc['duration_of_trip'])\n doc['time_percentage_driving'] = str(time_spent_driving/duration_of_trip*100)\n doc['time_percentage_walking'] = str(time_spent_walking/duration_of_trip*100)\n doc['time_percentage_chilling'] = str(time_spent_chilling/duration_of_trip*100)\n doc['time_percentage_bogus'] = str(time_spent_bogus/duration_of_trip*100)\n\n if silent == False:\n print('Done labeling mode of travel. Returning list of length ' \\\n + str(len(trip_list)) + '.')\n\n return trip_list",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
import re
import subprocess
PREFIX = "Enclave/"
OBJ_FILES = [
# "Enclave.o",
"p_block.o",
# "symbols.o",
"runtime.o",
"primitives.o",
"unary_op.o",
"unary/isna.o",
"unary/mathgen.o",
"unary/mathtrig.o",
"unary/plusminus.o",
"unary/summary.o",
"unary/print.o", # data dependent by design
"unary/ustats.o", # only the opcode for the dispatch, not the actual.
"binary_op.o",
"binary/arith.o",
"binary/bstats.o", # only the opcode for the dispatch, not the actual.
"binary/log_bin.o",
"binary/logic.o",
"binary/matmul.o",
"binary/compare.o",
"binary/pminmax.o",
"binary/bstats.o",
]
CONDITIONALS = [
]
# LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete']) - set(['jne', 'je'])
LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete'])
SKIP = ['nop',]
opcodes = set()
cond_results = {}
# subprocess.run(["make", "-f", "split.makefile", "clean"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# subprocess.run(["make", "-f", "split.makefile", "all"], check=True)
for obj_file in OBJ_FILES:
cond_results[obj_file] = set()
dump = subprocess.run(["objdump", "-M", "intel", "-dr", PREFIX + obj_file], stdout=subprocess.PIPE, check=True).stdout
for line in dump.decode("utf-8").split("\n"):
cols = line.split('\t')
if len(cols) > 2:
new_code = re.sub(' .*', '', cols[2])
if new_code == '':
continue
# if new_code in CONDITIONALS:
if new_code not in LIBFTFP and new_code not in SKIP:
cond_results[obj_file].add(new_code)
opcodes.add(new_code)
# print(sorted(opcodes))
print(sorted(opcodes - LIBFTFP))
for k,v in cond_results.items():
print(k,sorted(v))
combo = LIBFTFP.copy()
# for s in ['ja', 'jae', 'jb', 'je', 'jne', 'jge', 'jle', 'repz', 'cmovne', 'movq', 'jns']:
# combo.add(s)
combo.add("cmovne")
combo = sorted(combo)
for i in range(0, len(combo)):
print(r'\texttt{' + combo[i] + '}', end='')
if combo[i] not in LIBFTFP:
print('*', end='')
if i % 5 == 4:
print(r' \\')
else:
print(' & ', end='')
|
normal
|
{
"blob_id": "45d69194e14e8c20161e979d4ff34d0b90df4672",
"index": 4750,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\n<mask token>\ncombo.add('cmovne')\n<mask token>\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-3": "<mask token>\nPREFIX = 'Enclave/'\nOBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',\n 'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',\n 'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',\n 'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',\n 'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',\n 'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']\nCONDITIONALS = []\nLIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',\n 'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',\n 'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',\n 'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',\n 'xor', 'lea', 'or', 'sete'])\nSKIP = ['nop']\nopcodes = set()\ncond_results = {}\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\ncombo = LIBFTFP.copy()\ncombo.add('cmovne')\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-4": "import re\nimport subprocess\nPREFIX = 'Enclave/'\nOBJ_FILES = ['p_block.o', 'runtime.o', 'primitives.o', 'unary_op.o',\n 'unary/isna.o', 'unary/mathgen.o', 'unary/mathtrig.o',\n 'unary/plusminus.o', 'unary/summary.o', 'unary/print.o',\n 'unary/ustats.o', 'binary_op.o', 'binary/arith.o', 'binary/bstats.o',\n 'binary/log_bin.o', 'binary/logic.o', 'binary/matmul.o',\n 'binary/compare.o', 'binary/pminmax.o', 'binary/bstats.o']\nCONDITIONALS = []\nLIBFTFP = set(['add', 'mov', 'pop', 'setg', 'and', 'movabs', 'push', 'setl',\n 'call', 'movsd', 'rep', 'setle', 'cdqe', 'movsx', 'ret', 'setne', 'cmp',\n 'movsxd', 'sar', 'shl', 'imul', 'movzx', 'sbb', 'shr', 'je', 'mul',\n 'seta', 'sub', 'jmp', 'neg', 'setae', 'test', 'jne', 'not', 'setbe',\n 'xor', 'lea', 'or', 'sete'])\nSKIP = ['nop']\nopcodes = set()\ncond_results = {}\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run(['objdump', '-M', 'intel', '-dr', PREFIX +\n obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode('utf-8').split('\\n'):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\nprint(sorted(opcodes - LIBFTFP))\nfor k, v in cond_results.items():\n print(k, sorted(v))\ncombo = LIBFTFP.copy()\ncombo.add('cmovne')\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print('\\\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(' \\\\\\\\')\n else:\n print(' & ', end='')\n",
"step-5": "#!/usr/bin/env python3\nimport re\nimport subprocess\n\nPREFIX = \"Enclave/\"\nOBJ_FILES = [\n # \"Enclave.o\",\n \"p_block.o\",\n # \"symbols.o\",\n \"runtime.o\",\n \"primitives.o\",\n \"unary_op.o\",\n \"unary/isna.o\",\n \"unary/mathgen.o\",\n \"unary/mathtrig.o\",\n \"unary/plusminus.o\",\n \"unary/summary.o\",\n \"unary/print.o\", # data dependent by design\n \"unary/ustats.o\", # only the opcode for the dispatch, not the actual.\n \"binary_op.o\",\n \"binary/arith.o\",\n \"binary/bstats.o\", # only the opcode for the dispatch, not the actual.\n \"binary/log_bin.o\",\n \"binary/logic.o\",\n \"binary/matmul.o\",\n \"binary/compare.o\",\n \"binary/pminmax.o\",\n \"binary/bstats.o\",\n]\n\nCONDITIONALS = [\n]\n\n# LIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete']) - set(['jne', 'je'])\nLIBFTFP = set(['add','mov','pop','setg','and','movabs','push','setl', 'call','movsd','rep','setle','cdqe','movsx','ret','setne','cmp','movsxd','sar','shl', 'imul','movzx','sbb','shr','je','mul','seta','sub', 'jmp','neg','setae','test', 'jne','not','setbe','xor', 'lea','or','sete'])\n\nSKIP = ['nop',]\n\nopcodes = set()\ncond_results = {}\n# subprocess.run([\"make\", \"-f\", \"split.makefile\", \"clean\"], check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n# subprocess.run([\"make\", \"-f\", \"split.makefile\", \"all\"], check=True)\nfor obj_file in OBJ_FILES:\n cond_results[obj_file] = set()\n dump = subprocess.run([\"objdump\", \"-M\", \"intel\", \"-dr\", PREFIX + obj_file], stdout=subprocess.PIPE, check=True).stdout\n for line in dump.decode(\"utf-8\").split(\"\\n\"):\n cols = line.split('\\t')\n if len(cols) > 2:\n new_code = re.sub(' .*', '', cols[2])\n if new_code == '':\n continue\n # if new_code in CONDITIONALS:\n if new_code not in LIBFTFP and new_code not in SKIP:\n cond_results[obj_file].add(new_code)\n opcodes.add(new_code)\n\n\n# print(sorted(opcodes))\nprint(sorted(opcodes - LIBFTFP))\nfor k,v in cond_results.items():\n print(k,sorted(v))\n\ncombo = LIBFTFP.copy()\n# for s in ['ja', 'jae', 'jb', 'je', 'jne', 'jge', 'jle', 'repz', 'cmovne', 'movq', 'jns']:\n# combo.add(s)\ncombo.add(\"cmovne\")\ncombo = sorted(combo)\nfor i in range(0, len(combo)):\n print(r'\\texttt{' + combo[i] + '}', end='')\n if combo[i] not in LIBFTFP:\n print('*', end='')\n if i % 5 == 4:\n print(r' \\\\')\n else:\n print(' & ', end='')\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from armulator.armv6.bits_ops import add_with_carry, bit_not
from armulator.armv6.enums import InstrSet
from armulator.armv6.opcodes.opcode import Opcode
class SubsPcLrThumb(Opcode):
def __init__(self, instruction, imm32, n):
super().__init__(instruction)
self.imm32 = imm32
self.n = n
def execute(self, processor):
if processor.condition_passed():
if (processor.registers.current_mode_is_user_or_system() or
processor.registers.current_instr_set() == InstrSet.THUMB_EE):
print('unpredictable')
else:
operand2 = self.imm32
result = add_with_carry(processor.registers.get(self.n), bit_not(operand2, 32), 1)[0]
if (processor.registers.cpsr.m == 0b11010 and
processor.registers.cpsr.j and
processor.registers.cpsr.t):
print('unpredictable')
else:
processor.branch_write_pc(result)
|
normal
|
{
"blob_id": "89376b2464dfb724197a1c1e164af8277e03ad59",
"index": 2507,
"step-1": "<mask token>\n\n\nclass SubsPcLrThumb(Opcode):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SubsPcLrThumb(Opcode):\n\n def __init__(self, instruction, imm32, n):\n super().__init__(instruction)\n self.imm32 = imm32\n self.n = n\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass SubsPcLrThumb(Opcode):\n\n def __init__(self, instruction, imm32, n):\n super().__init__(instruction)\n self.imm32 = imm32\n self.n = n\n\n def execute(self, processor):\n if processor.condition_passed():\n if processor.registers.current_mode_is_user_or_system(\n ) or processor.registers.current_instr_set(\n ) == InstrSet.THUMB_EE:\n print('unpredictable')\n else:\n operand2 = self.imm32\n result = add_with_carry(processor.registers.get(self.n),\n bit_not(operand2, 32), 1)[0]\n if (processor.registers.cpsr.m == 26 and processor.\n registers.cpsr.j and processor.registers.cpsr.t):\n print('unpredictable')\n else:\n processor.branch_write_pc(result)\n",
"step-4": "from armulator.armv6.bits_ops import add_with_carry, bit_not\nfrom armulator.armv6.enums import InstrSet\nfrom armulator.armv6.opcodes.opcode import Opcode\n\n\nclass SubsPcLrThumb(Opcode):\n\n def __init__(self, instruction, imm32, n):\n super().__init__(instruction)\n self.imm32 = imm32\n self.n = n\n\n def execute(self, processor):\n if processor.condition_passed():\n if processor.registers.current_mode_is_user_or_system(\n ) or processor.registers.current_instr_set(\n ) == InstrSet.THUMB_EE:\n print('unpredictable')\n else:\n operand2 = self.imm32\n result = add_with_carry(processor.registers.get(self.n),\n bit_not(operand2, 32), 1)[0]\n if (processor.registers.cpsr.m == 26 and processor.\n registers.cpsr.j and processor.registers.cpsr.t):\n print('unpredictable')\n else:\n processor.branch_write_pc(result)\n",
"step-5": "from armulator.armv6.bits_ops import add_with_carry, bit_not\nfrom armulator.armv6.enums import InstrSet\nfrom armulator.armv6.opcodes.opcode import Opcode\n\n\nclass SubsPcLrThumb(Opcode):\n def __init__(self, instruction, imm32, n):\n super().__init__(instruction)\n self.imm32 = imm32\n self.n = n\n\n def execute(self, processor):\n if processor.condition_passed():\n if (processor.registers.current_mode_is_user_or_system() or\n processor.registers.current_instr_set() == InstrSet.THUMB_EE):\n print('unpredictable')\n else:\n operand2 = self.imm32\n result = add_with_carry(processor.registers.get(self.n), bit_not(operand2, 32), 1)[0]\n if (processor.registers.cpsr.m == 0b11010 and\n processor.registers.cpsr.j and\n processor.registers.cpsr.t):\n print('unpredictable')\n else:\n processor.branch_write_pc(result)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from ex.models import Teacher,Student,Group,Report,TeamEvaluation,PrivateLetter,ChatBoxIsOpen
from django.core import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth.hashers import make_password, check_password
# from plane.models import User, Student, LightList, Light, Score, Visit
# from plane.utils.jwt_auth import create_token, get_user_id
# from django.contrib.auth.hashers import make_password, check_password
# from rest_framework.authtoken.models import Token
# from django.contrib.auth import authenticate
import os
from ex.utils.jwt_auth import create_token, get_user_id
from ex.utils.extensions.auth import JwtQueryParamAuthentication
from django.db.models import Q
# Create your views here.
class getPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class enterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderTea_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverTea_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderTea_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverTea_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class getRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderTea_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverTea_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
# lenData = len(data_list1)
# if lenData > 10:
# data_list1 = data_list1[lenData - 10:lenData]
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
# 0:教师,1:学生,2:还未搜索,3:自己,4:用户不存在
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
|
normal
|
{
"blob_id": "4b5794ff79371c2e49c5d2b621805b08c4ff7acb",
"index": 8898,
"step-1": "<mask token>\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-2": "<mask token>\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-3": "<mask token>\n\n\nclass enterPrivateLetterView(APIView):\n <mask token>\n\n\nclass getRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-4": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom ex.models import Teacher, Student, Group, Report, TeamEvaluation, PrivateLetter, ChatBoxIsOpen\nfrom django.core import serializers\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.hashers import make_password, check_password\nimport os\nfrom ex.utils.jwt_auth import create_token, get_user_id\nfrom ex.utils.extensions.auth import JwtQueryParamAuthentication\nfrom django.db.models import Q\n\n\nclass getPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverTea_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverTea_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass enterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderTea_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverTea_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderTea_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverTea_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass getRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass closeChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass openChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass searchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuGetPrivateLetterListsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id\n ) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverStu_id=item.\n receiverStu_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=item.receiverStu_id) & Q(\n receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n else:\n for item2 in PrivateLetter.objects.filter(Q(\n senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 1}\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(\n senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {'id': item2.id, 'message': item2.message,\n 'time': str(item2.time.strftime(\n '%Y-%m-%d %H:%M:%S')), 'new': item2.new,\n 'Ienter': 2}\n msgList2.append(data)\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0, len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2 += 1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1 += 1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1 += 1\n else:\n msgList.append(msgList2[i2])\n i2 += 1\n data = {'id': item.id, 'receiver': receiver, 'msgList':\n msgList, 'name': receiver + str(identity), 'identity':\n identity}\n data_list.append(data)\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuEnterPrivateLetterView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverTea_id=receiver, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id\n =receiver) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,\n receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,\n receiverStu_id=receiverStu_id, message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =receiverStu_id) & Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=\n receiverStu_id, receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '发布私信成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuRecentContactsView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != '':\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != '':\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id\n ).first().stu_num\n data = {'receiver': receiver, 'identity': identity}\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1, -1, -1):\n if data_list[i]['receiver'] + str(data_list[i]['identity']\n ) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i][\n 'identity'])] = '1'\n data_list1.append(data_list[i])\n return Response({'status': 200, 'msg': '返回成功', 'data': data_list1})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuCloseChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=Student.objects.filter(\n stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuOpenChatBoxView(APIView):\n\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver\n ).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id\n =user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,\n receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n return Response({'status': 200, 'msg': '返回成功'})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n\n\nclass stuSearchContactView(APIView):\n\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self,\n request)[0]\n except Exception as e:\n return Response({'status': 403, 'msg': '未登录', 'err': e.args})\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {'identity': iden, 'receiver': receiver}\n return Response({'status': 200, 'msg': '返回成功', 'data': data})\n except Exception as e:\n return Response({'status': 204, 'msg': '遇到了异常错误', 'err': e.args})\n",
"step-5": "from django.shortcuts import render\nfrom django.http import HttpResponse,JsonResponse\nfrom ex.models import Teacher,Student,Group,Report,TeamEvaluation,PrivateLetter,ChatBoxIsOpen\nfrom django.core import serializers\n\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.contrib.auth.hashers import make_password, check_password\n# from plane.models import User, Student, LightList, Light, Score, Visit\n# from plane.utils.jwt_auth import create_token, get_user_id\n# from django.contrib.auth.hashers import make_password, check_password\n\n# from rest_framework.authtoken.models import Token\n# from django.contrib.auth import authenticate\n\nimport os\n\nfrom ex.utils.jwt_auth import create_token, get_user_id\n\nfrom ex.utils.extensions.auth import JwtQueryParamAuthentication\n\nfrom django.db.models import Q\n\n# Create your views here.\n\nclass getPrivateLetterListsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n \n user_id = payload['id']\n \n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverTea_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n # print(len(msgList1))\n else:\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverTea_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0,len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2+=1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1+=1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1+=1\n else:\n msgList.append(msgList2[i2])\n i2+=1\n\n # print(msgList)\n data = {\n 'id': item.id,\n 'receiver': receiver,\n 'msgList': msgList,\n 'name': receiver + str(identity),\n 'identity': identity\n }\n data_list.append(data)\n # print(data_list)\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass enterPrivateLetterView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n\n if identity == 0:\n privateLetter = PrivateLetter(senderTea_id=user_id,receiverTea_id=receiver,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverTea_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n privateLetter = PrivateLetter(senderTea_id=user_id,receiverStu_id=receiverStu_id,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverTea_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverTea_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '发布私信成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 获取最近联系人\nclass getRecentContactsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n \n data_list = []\n for item in PrivateLetter.objects.filter(senderTea_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != \"\":\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n # print(((receiver + str(identity)) not in dict))\n # if (receiver + str(identity)) not in dict:\n # dict[receiver + str(identity)] = '1'\n data = {\n # 'id': item.id,\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverTea_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != \"\":\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num\n # print(((receiver + str(identity)) not in dict))\n # if (receiver + str(identity)) not in dict:\n # dict[receiver + str(identity)] = '1'\n data = {\n # 'id': item.id,\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1,-1,-1):\n if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'\n data_list1.append(data_list[i])\n\n # lenData = len(data_list1)\n # if lenData > 10:\n # data_list1 = data_list1[lenData - 10:lenData]\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list1\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 关闭聊天框\nclass closeChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n \n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 打开聊天框\nclass openChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n \n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 搜索联系人\nclass searchContactView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n # print(receiver,identity=='0')\n\n # user = Teacher.objects.filter(id=username).first()\n iden = 4\n if identity == '0' and user_id == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {\n 'identity': iden,\n 'receiver': receiver\n }\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass stuGetPrivateLetterListsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n username = payload['username']\n # print(user_id,username)\n data_list = []\n for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(isOpen=1)):\n msgList = []\n msgList1 = []\n msgList2 = []\n receiver = item.receiverTea_id\n identity = 0\n if item.receiverStu_id != None:\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n identity = 1\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverStu_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n # print(len(msgList1))\n else:\n for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 1 # 发送\n }\n msgList1.append(data)\n for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverStu_id=user_id)):\n data = {\n 'id': item2.id,\n 'message': item2.message,\n 'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),\n 'new': item2.new,\n 'Ienter': 2 # 接收\n }\n msgList2.append(data)\n # msgList.sort()\n len1 = len(msgList1)\n len2 = len(msgList2)\n i1 = 0\n i2 = 0\n for i in range(0,len1 + len2):\n if i1 >= len1:\n msgList.append(msgList2[i2])\n i2+=1\n elif i2 >= len2:\n msgList.append(msgList1[i1])\n i1+=1\n elif msgList1[i1]['time'] < msgList2[i2]['time']:\n msgList.append(msgList1[i1])\n i1+=1\n else:\n msgList.append(msgList2[i2])\n i2+=1\n\n # print(msgList)\n data = {\n 'id': item.id,\n 'receiver': receiver,\n 'msgList': msgList,\n 'name': receiver + str(identity),\n 'identity': identity\n }\n data_list.append(data)\n # print(data_list)\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\nclass stuEnterPrivateLetterView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n username = payload['username']\n # print(user_id,username)\n receiver = request.data.get('receiver')\n message = request.data.get('message')\n identity = request.data.get('identity')\n\n if identity == 0:\n privateLetter = PrivateLetter(senderStu_id=user_id,receiverTea_id=receiver,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverStu_id=user_id)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n privateLetter = PrivateLetter(senderStu_id=user_id,receiverStu_id=receiverStu_id,message=message)\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverStu_id=user_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverStu_id=user_id)\n privateLetter.save()\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '发布私信成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 获取最近联系人\nclass stuRecentContactsView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n \n data_list = []\n for item in PrivateLetter.objects.filter(senderStu_id=user_id):\n if item.receiverTea_id != None and item.receiverTea_id != \"\":\n identity = 0\n receiver = item.receiverTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num\n data = {\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n for item in PrivateLetter.objects.filter(receiverStu_id=user_id):\n if item.senderTea_id != None and item.senderTea_id != \"\":\n identity = 0\n receiver = item.senderTea_id\n else:\n identity = 1\n receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num\n data = {\n 'receiver': receiver,\n 'identity': identity #老师:0;学生:1\n }\n data_list.append(data)\n lenData = len(data_list)\n dict = {}\n data_list1 = []\n for i in range(lenData - 1,-1,-1):\n if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:\n dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'\n data_list1.append(data_list[i])\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data_list1\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 关闭聊天框\nclass stuCloseChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n iden = request.data.get('iden')\n \n if iden == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()\n else:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()\n chatBoxIsOpen.delete()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 打开聊天框\nclass stuOpenChatBoxView(APIView):\n def post(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n\n user_id = payload['id']\n receiver = request.data.get('receiver')\n identity = request.data.get('identity')\n \n if identity == 0:\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverTea_id=receiver)\n else:\n receiverStu_id = Student.objects.filter(stu_num=receiver).first().id\n chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()\n if not chatBoxIsOpen:\n chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverStu_id=receiverStu_id)\n chatBoxIsOpen.save()\n\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n\n# 搜索联系人\nclass stuSearchContactView(APIView):\n def get(self, request, *args, **kwargs):\n try:\n try:\n payload = JwtQueryParamAuthentication.authenticate(self, request)[0]\n except Exception as e:\n return Response({\n 'status': 403,\n 'msg': '未登录',\n 'err': e.args\n })\n user_id = payload['id']\n username = payload['username']\n receiver = request.GET['receiver']\n identity = request.GET['identity']\n # print(receiver,identity=='0')\n\n # user = Teacher.objects.filter(id=username).first()\n # 0:教师,1:学生,2:还未搜索,3:自己,4:用户不存在\n iden = 4\n if identity == '1' and username == receiver:\n iden = 3\n elif identity == '0':\n user = Teacher.objects.filter(id=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 0\n else:\n user = Student.objects.filter(stu_num=receiver).first()\n if not user:\n iden = 4\n else:\n iden = 1\n data = {\n 'identity': iden,\n 'receiver': receiver\n }\n return Response({\n 'status': 200,\n 'msg': '返回成功',\n 'data': data\n })\n except Exception as e:\n return Response({\n 'status': 204,\n 'msg': '遇到了异常错误',\n 'err': e.args\n })\n",
"step-ids": [
14,
18,
21,
25,
26
]
}
|
[
14,
18,
21,
25,
26
] |
import pytest
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from respa_exchange import listener
from respa_exchange.ews.xml import M, NAMESPACES, T
from respa_exchange.models import ExchangeResource
from respa_exchange.tests.session import SoapSeller
class SubscriptionHandler(object):
"""
SoapSeller handler for the streaming requests.
"""
def __init__(self, resource):
self.resource = resource
self.subscription_to_resource = {}
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces=NAMESPACES): # pragma: no cover
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(
M.ResponseMessages(
M.SubscribeResponseMessage(
M.ResponseCode('NoError'),
M.SubscriptionId(subscription_id),
ResponseClass='Success',
),
),
)
def _generate_event(self, type):
return getattr(T, type)(
T.TimeStamp(now().isoformat()),
T.ItemId(
Id=get_random_string(),
ChangeKey=get_random_string(),
),
T.ParentFolderId(
Id=get_random_string(),
ChangeKey=get_random_string(),
),
)
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES): # pragma: no cover
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0].text
# This would be a long-polling operation,
# but ain't nobody got time for that
return M.GetStreamingEventsResponse(
M.ResponseMessages(
M.GetStreamingEventsResponseMessage(
M.ResponseCode('NoError'),
M.Notifications(
M.Notification(
T.SubscriptionId(sub_id),
self._generate_event('NewMailEvent'),
),
),
ResponseClass='Success',
),
),
)
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES): # pragma: no cover
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(
M.ResponseMessages(
M.UnsubscribeResponseMessage(
M.ResponseCode('NoError'),
ResponseClass='Success',
),
),
)
@pytest.mark.django_db
def test_listener(settings, space_resource, exchange, monkeypatch):
email = '%[email protected]' % get_random_string()
ex_resource = ExchangeResource.objects.create(
resource=space_resource,
principal_email=email,
exchange=exchange,
sync_to_respa=True,
)
assert ex_resource.reservations.count() == 0
delegate = SubscriptionHandler(ex_resource)
SoapSeller.wire(settings, delegate)
notification_listener = listener.NotificationListener()
synced_resources = [] # Keep track of the resources we get sync-request events for
def sync_resource(resource): # Our pretend sync handler
synced_resources.append(resource)
# Ask the listener to stop after we get a resource,
# so this test actually ends someday:
notification_listener.stop()
monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)
notification_listener.start()
# ... so when `sync_resource` is called, this'll eventually happen:
assert ex_resource in synced_resources
|
normal
|
{
"blob_id": "e4bfa0a55fe0dbb547bc5f65554ef96be654ec7a",
"index": 2176,
"step-1": "<mask token>\n\n\nclass SubscriptionHandler(object):\n <mask token>\n <mask token>\n\n def handle_subscribe(self, request):\n if not request.xpath('//m:StreamingSubscriptionRequest', namespaces\n =NAMESPACES):\n return\n emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)\n assert len(emails) == 1\n assert emails[0].text == self.resource.principal_email\n subscription_id = get_random_string(10)\n self.subscription_to_resource[subscription_id] = self.resource\n return M.SubscribeResponse(M.ResponseMessages(M.\n SubscribeResponseMessage(M.ResponseCode('NoError'), M.\n SubscriptionId(subscription_id), ResponseClass='Success')))\n <mask token>\n\n def handle_get_events(self, request):\n if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):\n return\n sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0\n ].text\n return M.GetStreamingEventsResponse(M.ResponseMessages(M.\n GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.\n Notifications(M.Notification(T.SubscriptionId(sub_id), self.\n _generate_event('NewMailEvent'))), ResponseClass='Success')))\n\n def handle_unsubscribe(self, request):\n if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):\n return\n subscription_id = request.xpath('//m:SubscriptionId', namespaces=\n NAMESPACES)[0].text\n self.subscription_to_resource.pop(subscription_id)\n return M.UnsubscribeResponse(M.ResponseMessages(M.\n UnsubscribeResponseMessage(M.ResponseCode('NoError'),\n ResponseClass='Success')))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SubscriptionHandler(object):\n \"\"\"\n SoapSeller handler for the streaming requests.\n \"\"\"\n\n def __init__(self, resource):\n self.resource = resource\n self.subscription_to_resource = {}\n\n def handle_subscribe(self, request):\n if not request.xpath('//m:StreamingSubscriptionRequest', namespaces\n =NAMESPACES):\n return\n emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)\n assert len(emails) == 1\n assert emails[0].text == self.resource.principal_email\n subscription_id = get_random_string(10)\n self.subscription_to_resource[subscription_id] = self.resource\n return M.SubscribeResponse(M.ResponseMessages(M.\n SubscribeResponseMessage(M.ResponseCode('NoError'), M.\n SubscriptionId(subscription_id), ResponseClass='Success')))\n\n def _generate_event(self, type):\n return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id\n =get_random_string(), ChangeKey=get_random_string()), T.\n ParentFolderId(Id=get_random_string(), ChangeKey=\n get_random_string()))\n\n def handle_get_events(self, request):\n if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):\n return\n sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0\n ].text\n return M.GetStreamingEventsResponse(M.ResponseMessages(M.\n GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.\n Notifications(M.Notification(T.SubscriptionId(sub_id), self.\n _generate_event('NewMailEvent'))), ResponseClass='Success')))\n\n def handle_unsubscribe(self, request):\n if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):\n return\n subscription_id = request.xpath('//m:SubscriptionId', namespaces=\n NAMESPACES)[0].text\n self.subscription_to_resource.pop(subscription_id)\n return M.UnsubscribeResponse(M.ResponseMessages(M.\n UnsubscribeResponseMessage(M.ResponseCode('NoError'),\n ResponseClass='Success')))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SubscriptionHandler(object):\n \"\"\"\n SoapSeller handler for the streaming requests.\n \"\"\"\n\n def __init__(self, resource):\n self.resource = resource\n self.subscription_to_resource = {}\n\n def handle_subscribe(self, request):\n if not request.xpath('//m:StreamingSubscriptionRequest', namespaces\n =NAMESPACES):\n return\n emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)\n assert len(emails) == 1\n assert emails[0].text == self.resource.principal_email\n subscription_id = get_random_string(10)\n self.subscription_to_resource[subscription_id] = self.resource\n return M.SubscribeResponse(M.ResponseMessages(M.\n SubscribeResponseMessage(M.ResponseCode('NoError'), M.\n SubscriptionId(subscription_id), ResponseClass='Success')))\n\n def _generate_event(self, type):\n return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id\n =get_random_string(), ChangeKey=get_random_string()), T.\n ParentFolderId(Id=get_random_string(), ChangeKey=\n get_random_string()))\n\n def handle_get_events(self, request):\n if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):\n return\n sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0\n ].text\n return M.GetStreamingEventsResponse(M.ResponseMessages(M.\n GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.\n Notifications(M.Notification(T.SubscriptionId(sub_id), self.\n _generate_event('NewMailEvent'))), ResponseClass='Success')))\n\n def handle_unsubscribe(self, request):\n if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):\n return\n subscription_id = request.xpath('//m:SubscriptionId', namespaces=\n NAMESPACES)[0].text\n self.subscription_to_resource.pop(subscription_id)\n return M.UnsubscribeResponse(M.ResponseMessages(M.\n UnsubscribeResponseMessage(M.ResponseCode('NoError'),\n ResponseClass='Success')))\n\n\[email protected]_db\ndef test_listener(settings, space_resource, exchange, monkeypatch):\n email = '%[email protected]' % get_random_string()\n ex_resource = ExchangeResource.objects.create(resource=space_resource,\n principal_email=email, exchange=exchange, sync_to_respa=True)\n assert ex_resource.reservations.count() == 0\n delegate = SubscriptionHandler(ex_resource)\n SoapSeller.wire(settings, delegate)\n notification_listener = listener.NotificationListener()\n synced_resources = []\n\n def sync_resource(resource):\n synced_resources.append(resource)\n notification_listener.stop()\n monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)\n notification_listener.start()\n assert ex_resource in synced_resources\n",
"step-4": "import pytest\nfrom django.utils.crypto import get_random_string\nfrom django.utils.timezone import now\nfrom respa_exchange import listener\nfrom respa_exchange.ews.xml import M, NAMESPACES, T\nfrom respa_exchange.models import ExchangeResource\nfrom respa_exchange.tests.session import SoapSeller\n\n\nclass SubscriptionHandler(object):\n \"\"\"\n SoapSeller handler for the streaming requests.\n \"\"\"\n\n def __init__(self, resource):\n self.resource = resource\n self.subscription_to_resource = {}\n\n def handle_subscribe(self, request):\n if not request.xpath('//m:StreamingSubscriptionRequest', namespaces\n =NAMESPACES):\n return\n emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)\n assert len(emails) == 1\n assert emails[0].text == self.resource.principal_email\n subscription_id = get_random_string(10)\n self.subscription_to_resource[subscription_id] = self.resource\n return M.SubscribeResponse(M.ResponseMessages(M.\n SubscribeResponseMessage(M.ResponseCode('NoError'), M.\n SubscriptionId(subscription_id), ResponseClass='Success')))\n\n def _generate_event(self, type):\n return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id\n =get_random_string(), ChangeKey=get_random_string()), T.\n ParentFolderId(Id=get_random_string(), ChangeKey=\n get_random_string()))\n\n def handle_get_events(self, request):\n if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):\n return\n sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0\n ].text\n return M.GetStreamingEventsResponse(M.ResponseMessages(M.\n GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.\n Notifications(M.Notification(T.SubscriptionId(sub_id), self.\n _generate_event('NewMailEvent'))), ResponseClass='Success')))\n\n def handle_unsubscribe(self, request):\n if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):\n return\n subscription_id = request.xpath('//m:SubscriptionId', namespaces=\n NAMESPACES)[0].text\n self.subscription_to_resource.pop(subscription_id)\n return M.UnsubscribeResponse(M.ResponseMessages(M.\n UnsubscribeResponseMessage(M.ResponseCode('NoError'),\n ResponseClass='Success')))\n\n\[email protected]_db\ndef test_listener(settings, space_resource, exchange, monkeypatch):\n email = '%[email protected]' % get_random_string()\n ex_resource = ExchangeResource.objects.create(resource=space_resource,\n principal_email=email, exchange=exchange, sync_to_respa=True)\n assert ex_resource.reservations.count() == 0\n delegate = SubscriptionHandler(ex_resource)\n SoapSeller.wire(settings, delegate)\n notification_listener = listener.NotificationListener()\n synced_resources = []\n\n def sync_resource(resource):\n synced_resources.append(resource)\n notification_listener.stop()\n monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)\n notification_listener.start()\n assert ex_resource in synced_resources\n",
"step-5": "import pytest\nfrom django.utils.crypto import get_random_string\nfrom django.utils.timezone import now\n\nfrom respa_exchange import listener\nfrom respa_exchange.ews.xml import M, NAMESPACES, T\nfrom respa_exchange.models import ExchangeResource\nfrom respa_exchange.tests.session import SoapSeller\n\n\nclass SubscriptionHandler(object):\n \"\"\"\n SoapSeller handler for the streaming requests.\n \"\"\"\n\n def __init__(self, resource):\n self.resource = resource\n self.subscription_to_resource = {}\n\n def handle_subscribe(self, request):\n if not request.xpath('//m:StreamingSubscriptionRequest', namespaces=NAMESPACES): # pragma: no cover\n return\n emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)\n assert len(emails) == 1\n assert emails[0].text == self.resource.principal_email\n subscription_id = get_random_string(10)\n self.subscription_to_resource[subscription_id] = self.resource\n return M.SubscribeResponse(\n M.ResponseMessages(\n M.SubscribeResponseMessage(\n M.ResponseCode('NoError'),\n M.SubscriptionId(subscription_id),\n ResponseClass='Success',\n ),\n ),\n )\n\n def _generate_event(self, type):\n return getattr(T, type)(\n T.TimeStamp(now().isoformat()),\n T.ItemId(\n Id=get_random_string(),\n ChangeKey=get_random_string(),\n ),\n T.ParentFolderId(\n Id=get_random_string(),\n ChangeKey=get_random_string(),\n ),\n )\n\n def handle_get_events(self, request):\n if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES): # pragma: no cover\n return\n sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0].text\n # This would be a long-polling operation,\n # but ain't nobody got time for that\n return M.GetStreamingEventsResponse(\n M.ResponseMessages(\n M.GetStreamingEventsResponseMessage(\n M.ResponseCode('NoError'),\n M.Notifications(\n M.Notification(\n T.SubscriptionId(sub_id),\n self._generate_event('NewMailEvent'),\n ),\n ),\n ResponseClass='Success',\n ),\n ),\n )\n\n def handle_unsubscribe(self, request):\n if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES): # pragma: no cover\n return\n subscription_id = request.xpath('//m:SubscriptionId', namespaces=NAMESPACES)[0].text\n self.subscription_to_resource.pop(subscription_id)\n return M.UnsubscribeResponse(\n M.ResponseMessages(\n M.UnsubscribeResponseMessage(\n M.ResponseCode('NoError'),\n ResponseClass='Success',\n ),\n ),\n )\n\n\[email protected]_db\ndef test_listener(settings, space_resource, exchange, monkeypatch):\n email = '%[email protected]' % get_random_string()\n ex_resource = ExchangeResource.objects.create(\n resource=space_resource,\n principal_email=email,\n exchange=exchange,\n sync_to_respa=True,\n )\n assert ex_resource.reservations.count() == 0\n delegate = SubscriptionHandler(ex_resource)\n SoapSeller.wire(settings, delegate)\n\n notification_listener = listener.NotificationListener()\n\n synced_resources = [] # Keep track of the resources we get sync-request events for\n\n def sync_resource(resource): # Our pretend sync handler\n synced_resources.append(resource)\n # Ask the listener to stop after we get a resource,\n # so this test actually ends someday:\n notification_listener.stop()\n\n monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)\n notification_listener.start()\n # ... so when `sync_resource` is called, this'll eventually happen:\n assert ex_resource in synced_resources\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
## PURPOSE: get reads for certain motifs across certain tumors
## INPUT: manifest data all-tumor-manifest.csv
## collapsed fastq files sample.converted.unpaired.fastq.collapsed
## OUTPUT: table containing reads for specific motif across samples motif.tumor.common.reads.fastq.collapsed.summary.tsv
import os
import os.path
import numpy as np
import pandas as pd
import collections
import subprocess
from pathlib import Path
import time
base_path = '/media/user/2TB (MAC)/Susanna/'
collapsed_ext = '.converted.unpaired.fastq.collapsed'
manifest_file = base_path + 'all-tumor-manifest.csv'
manifest_data =pd.read_csv(manifest_file, header='infer', sep=',')
'''
file = base_path + 'TARGET/TARGET-manifest.csv'
data = pd.read_csv(file, header='infer', sep=',')
data['DISEASE.ABBV'] = 'TARGET'
manifest_data = pd.concat([manifest_data, data])
print(manifest_data.shape)
manifest_data.to_csv(manifest_file, sep=',', index=False)
'''
def getCollapsedFastqDataframe(file):
df = pd.read_table(file, header=None, delim_whitespace=True)
df = df.dropna(axis=1, how='all')
sample = file.split('/')
sample = sample[len(sample)-1]
sample = sample.split('.')[0]
df.columns = ['READS', 'SEQUENCE']
return df
def getManifestID(name, tumor):
id = manifest_data.loc[(manifest_data['DISEASE.ABBV']==tumor) & (manifest_data['NAME']==name)]['ID']
id = id.tolist()[0]
id = str(id)
return str(id)
motifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']
mirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']
i = 1
motif = motifs[i]
mirna = mirnas[i]
for subdir, dirs, files in os.walk(base_path):
if '/collapsed_fastq' in subdir:
folders = subdir.split('/')
tumor = folders[len(folders)-2]
if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:
continue
print(tumor)
summary_file = base_path + 'motif_reads/' + mirna + '/' + motif + '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv'
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
total_time_start = time.time()
for f in os.listdir(subdir):
time_start = time.time()
if f[0] == '.':
break
patient = f.split('.')[0]
id = getManifestID(patient, tumor)
if id not in matched_ids:
matched_ids.append(id)
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
else:
print('SUMMARY FILE NOT FOUND')
summary_data = pd.DataFrame({'SEQUENCE':[]})
matched_ids = list(summary_data)
common_seqs = list(summary_data['SEQUENCE'])
#matched_seq = list(summary_data['SEQUENCE'])
summary_data = None
collapsed_file = subdir+'/'+f
collapsed_data = getCollapsedFastqDataframe(collapsed_file)
#print(collapsed_data.shape[0])
if len(common_seqs) > 0:
collapsed_data = collapsed_data[collapsed_data.SEQUENCE.isin(common_seqs)]
num_rows = collapsed_data.shape[0]
#print(collapsed_data.shape[0])
collapsed_data.columns = [str(id), 'SEQUENCE']
match_collapsed_data = collapsed_data #pd.DataFrame(columns = ['READS', 'SEQUENCE'])
match_collapsed_data.columns = [str(id), 'SEQUENCE']
if Path(summary_file).exists():
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
summary_data = pd.merge(summary_data, match_collapsed_data, on='SEQUENCE', sort=False, how='inner')
else:
summary_data = match_collapsed_data
summary_data.to_csv(summary_file, sep='\t', index=False)
summary_data = pd.DataFrame({'SEQUENCE':[]})
time_end = time.time()
#print('TUMOR: ' + tumor + ' SAMPLE: ' + str(patient) + ' TOTAL TIME: ' + str((time_end-time_start)/60) + ' ROWS: ' + str(num_rows))
summary_data = pd.read_table(summary_file, header='infer', sep='\t')
match_summary_data = summary_data.copy()
for index, row in summary_data.iterrows():
sequence = str(row['SEQUENCE'])
if motif not in sequence:
match_summary_data = match_summary_data[match_summary_data.SEQUENCE != sequence]
match_summary_data.to_csv(summary_file, sep='\t', index=False)
total_time_end = time.time()
print('TOTAl TUMOR TIME: ' + str(total_time_end-total_time_start))
|
normal
|
{
"blob_id": "ddabceb223f4e457a0f69af5abf793ae72e5f432",
"index": 1465,
"step-1": "<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\n<mask token>\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-3": "<mask token>\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data = pd.read_csv(manifest_file, header='infer', sep=',')\n<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-4": "import os\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport collections\nimport subprocess\nfrom pathlib import Path\nimport time\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data = pd.read_csv(manifest_file, header='infer', sep=',')\n<mask token>\n\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample) - 1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df\n\n\ndef getManifestID(name, tumor):\n id = manifest_data.loc[(manifest_data['DISEASE.ABBV'] == tumor) & (\n manifest_data['NAME'] == name)]['ID']\n id = id.tolist()[0]\n id = str(id)\n return str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\nfor subdir, dirs, files in os.walk(base_path):\n if '/collapsed_fastq' in subdir:\n folders = subdir.split('/')\n tumor = folders[len(folders) - 2]\n if tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n continue\n print(tumor)\n summary_file = (base_path + 'motif_reads/' + mirna + '/' + motif +\n '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv')\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t'\n )\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n total_time_start = time.time()\n for f in os.listdir(subdir):\n time_start = time.time()\n if f[0] == '.':\n break\n patient = f.split('.')[0]\n id = getManifestID(patient, tumor)\n if id not in matched_ids:\n matched_ids.append(id)\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n else:\n print('SUMMARY FILE NOT FOUND')\n summary_data = pd.DataFrame({'SEQUENCE': []})\n matched_ids = list(summary_data)\n common_seqs = list(summary_data['SEQUENCE'])\n summary_data = None\n collapsed_file = subdir + '/' + f\n collapsed_data = getCollapsedFastqDataframe(collapsed_file)\n if len(common_seqs) > 0:\n collapsed_data = collapsed_data[collapsed_data.SEQUENCE\n .isin(common_seqs)]\n num_rows = collapsed_data.shape[0]\n collapsed_data.columns = [str(id), 'SEQUENCE']\n match_collapsed_data = collapsed_data\n match_collapsed_data.columns = [str(id), 'SEQUENCE']\n if Path(summary_file).exists():\n summary_data = pd.read_table(summary_file, header=\n 'infer', sep='\\t')\n summary_data = pd.merge(summary_data,\n match_collapsed_data, on='SEQUENCE', sort=False,\n how='inner')\n else:\n summary_data = match_collapsed_data\n summary_data.to_csv(summary_file, sep='\\t', index=False)\n summary_data = pd.DataFrame({'SEQUENCE': []})\n time_end = time.time()\n summary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n match_summary_data = summary_data.copy()\n for index, row in summary_data.iterrows():\n sequence = str(row['SEQUENCE'])\n if motif not in sequence:\n match_summary_data = match_summary_data[match_summary_data.\n SEQUENCE != sequence]\n match_summary_data.to_csv(summary_file, sep='\\t', index=False)\n total_time_end = time.time()\n print('TOTAl TUMOR TIME: ' + str(total_time_end - total_time_start))\n",
"step-5": "## PURPOSE: get reads for certain motifs across certain tumors\n## INPUT: manifest data \t\t\t\t\t\t\t all-tumor-manifest.csv\n## \t\t collapsed fastq files \tsample.converted.unpaired.fastq.collapsed\n## OUTPUT: table containing reads for specific motif across samples \tmotif.tumor.common.reads.fastq.collapsed.summary.tsv\nimport os\nimport os.path\nimport numpy as np\nimport pandas as pd\nimport collections\nimport subprocess\nfrom pathlib import Path\nimport time\n\nbase_path = '/media/user/2TB (MAC)/Susanna/'\ncollapsed_ext = '.converted.unpaired.fastq.collapsed'\n\nmanifest_file = base_path + 'all-tumor-manifest.csv'\nmanifest_data =pd.read_csv(manifest_file, header='infer', sep=',')\n'''\nfile = base_path + 'TARGET/TARGET-manifest.csv'\ndata = pd.read_csv(file, header='infer', sep=',')\ndata['DISEASE.ABBV'] = 'TARGET'\n\nmanifest_data = pd.concat([manifest_data, data])\nprint(manifest_data.shape)\nmanifest_data.to_csv(manifest_file, sep=',', index=False)\n'''\n\ndef getCollapsedFastqDataframe(file):\n df = pd.read_table(file, header=None, delim_whitespace=True)\n df = df.dropna(axis=1, how='all')\n sample = file.split('/')\n sample = sample[len(sample)-1]\n sample = sample.split('.')[0]\n df.columns = ['READS', 'SEQUENCE']\n return df \n\ndef getManifestID(name, tumor):\n\tid = manifest_data.loc[(manifest_data['DISEASE.ABBV']==tumor) & (manifest_data['NAME']==name)]['ID']\n\tid = id.tolist()[0]\n\tid = str(id)\n\treturn str(id)\n\n\nmotifs = ['TGGTTATCTAGCT', 'TTATCAGACTGAT']\nmirnas = ['hsa-miR-9-5p-1-2-3', 'hsa-miR-21-5p']\n\ni = 1\nmotif = motifs[i]\nmirna = mirnas[i]\n\n\nfor subdir, dirs, files in os.walk(base_path):\n\tif '/collapsed_fastq' in subdir:\n\n\t\tfolders = subdir.split('/')\n\t\ttumor = folders[len(folders)-2]\n\t\tif tumor in ['THCA', 'STAD', 'SKCM', 'PCPG']:\n\t\t\tcontinue\n\t\tprint(tumor)\n\t\t\n\t\tsummary_file = base_path + 'motif_reads/' + mirna + '/' + motif + '.' + tumor + '.common.reads.fastq.collapsed.summary.tsv'\n\t\tif Path(summary_file).exists():\n\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\telse:\n\t\t\tprint('SUMMARY FILE NOT FOUND')\n\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\t\tmatched_ids = list(summary_data)\n\t\tcommon_seqs = list(summary_data['SEQUENCE'])\n\n\t\ttotal_time_start = time.time()\n\n\t\tfor f in os.listdir(subdir):\n\n\t\t\ttime_start = time.time()\n\t\t\tif f[0] == '.':\n\t\t\t\tbreak\n\n\t\t\tpatient = f.split('.')[0]\n\t\t\tid = getManifestID(patient, tumor)\n\t\t\tif id not in matched_ids:\n\n\t\t\t\tmatched_ids.append(id)\n\t\t\t\t\n\t\t\t\tif Path(summary_file).exists():\n\t\t\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\t\t\telse:\n\t\t\t\t\tprint('SUMMARY FILE NOT FOUND')\n\t\t\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\t\t\t\tmatched_ids = list(summary_data)\n\t\t\t\tcommon_seqs = list(summary_data['SEQUENCE'])\n\t\t\t\t#matched_seq = list(summary_data['SEQUENCE'])\n\t\t\t\tsummary_data = None\n\n\t\t\t\tcollapsed_file = subdir+'/'+f\t\t\n\t\t\t\tcollapsed_data = getCollapsedFastqDataframe(collapsed_file)\n\t\t\t\t#print(collapsed_data.shape[0])\n\t\t\t\tif len(common_seqs) > 0:\n\t\t\t\t\tcollapsed_data = collapsed_data[collapsed_data.SEQUENCE.isin(common_seqs)]\n\t\t\t\tnum_rows = collapsed_data.shape[0]\n\t\t\t\t#print(collapsed_data.shape[0])\n\t\t\t\tcollapsed_data.columns = [str(id), 'SEQUENCE']\n\t\t\t\tmatch_collapsed_data = collapsed_data #pd.DataFrame(columns = ['READS', 'SEQUENCE'])\n\n\t\t\t\tmatch_collapsed_data.columns = [str(id), 'SEQUENCE']\n\n\t\t\t\tif Path(summary_file).exists():\n\t\t\t\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\t\t\t\tsummary_data = pd.merge(summary_data, match_collapsed_data, on='SEQUENCE', sort=False, how='inner')\n\t\t\t\telse:\n\t\t\t\t\tsummary_data = match_collapsed_data\n\n\n\t\t\t\tsummary_data.to_csv(summary_file, sep='\\t', index=False) \n\t\t\t\tsummary_data = pd.DataFrame({'SEQUENCE':[]})\n\n\t\t\t\ttime_end = time.time()\n\n\t\t\t\t#print('TUMOR: ' + tumor + ' SAMPLE: ' + str(patient) + ' TOTAL TIME: ' + str((time_end-time_start)/60) + ' ROWS: ' + str(num_rows))\n\t\t\n\t\tsummary_data = pd.read_table(summary_file, header='infer', sep='\\t')\n\t\tmatch_summary_data = summary_data.copy()\n\t\tfor index, row in summary_data.iterrows():\n\t\t\tsequence = str(row['SEQUENCE'])\n\t\t\tif motif not in sequence:\n\t\t\t\tmatch_summary_data = match_summary_data[match_summary_data.SEQUENCE != sequence]\n\t\tmatch_summary_data.to_csv(summary_file, sep='\\t', index=False) \n\t\ttotal_time_end = time.time()\n\t\tprint('TOTAl TUMOR TIME: ' + str(total_time_end-total_time_start)) \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.